From 85fb7df011d7881989d7d0dc0e756f3dcc2ad254 Mon Sep 17 00:00:00 2001 From: Ingrid Luijx <ingrid.vanderlaan@wur.nl> Date: Fri, 29 Apr 2016 12:28:27 +0000 Subject: [PATCH] Updates in analysis for producing graphs etc. for CTE2015 website, plus some minor changes for Eric's SAM runs. --- da/analysis/amazon_mask.nc | Bin 0 -> 533670 bytes da/analysis/expand_fluxes.py | 16 ++- da/analysis/expand_molefractions.py | 6 +- da/analysis/siteseries.py | 203 ++++++++++++++++++---------- da/analysis/summarize_obs.py | 110 +++++++++++---- da/tools/initexit.py | 6 +- da/tools/pipeline.py | 5 +- 7 files changed, 233 insertions(+), 113 deletions(-) create mode 100644 da/analysis/amazon_mask.nc diff --git a/da/analysis/amazon_mask.nc b/da/analysis/amazon_mask.nc new file mode 100644 index 0000000000000000000000000000000000000000..9555d12002481429a340dfafcd31faede0ec756c GIT binary patch literal 533670 zcmeD5aB<`1lHy|G;9!7(|4^_@1tMkul?b?W($wF>C5VY<3zHI9jF|;YF)%=NU{G94 z42&QpY|<c-kp;|QU|`_LNi4}MDNRW=W&%ktFtEvhXhs$&mw_`UKQA4u!iO6q!@$5M z3!)iWm_ZZ+0|Qr4YI<gVUT$J>_8keR7>tH;QK_>hA_LrlgIyVpNkJqS`Is0!fnBs@ z*^VB!AddhBkcXKV1R(Mt66PZwsKX@WL81%{3^1R=Xs9qJlqN*6AWVRS5Q7E-Bf~Od zn7#+I@98;(c(^cdFfcJN2r@7*q-ZSDhsbg=z--z4Y^g9<6(>j)FGQ6gx+)jL&o2cP z?7^O6U~phC;bt)T&&<HU#K_19^%h7wL>q`q2s;3x7!tx9!nC3$F(|;Le};FNfs+6) zFC(J}ND;`8Eg%L10|Sg^76JJQo0Y75AQ@&YP&h+0&2tx(0S6{CC&LR#h*%Xgf)+q& z4k55EMjvS0FJOhJV`v4@7@{CS?3hQH0b~+(Nn}wTZ3eiXk;QScSD(C+0hWY?odPrr zAA~}}asiZ1fYJ(3`auX(9!e)bX$2_#AQ&nSr4yjE0+fCb1eJ%<2~b)AN<RpM%0uY{ zD6Igc9|S<<p>zV2R)Eq6{2}rVQ2Kx$ln<p3_(J(m`hX9V52X)yL-|npfESbxr4M*Q z`B3_R2b2$`54c15Q2Kxyln<p3xI+0*`hW|R52X({L-|npfD@Dtr4Kkl`B3_R1C$S? z8|)$c20JKi3#DzKv^A8rg3^{y+5$?OLuoT8Z3?ALptLcRHiFWIP}%@OGw4HUT_~*$ zr8S|nI+Rw0(#lX;5lYKJX(=cz2Bn3dG!K+!fzm(rgVQm?0~l=&;s1crEDjJp50n;y z(qd3r3QEgCX+<cl45d|}v^td5gwonjS{F*|L+AzrC~XL(ji9tKls19Vrcl}pN}EGz z3n*;~rLCZ}HI%l2(za0A4ocfYXa@%feE>>3I70bQ+QA9RhtdwtP(GA)aDnonw1X>@ z52YR4pnNFp;11<OX$KD|A4)rTLiteI!3)ZV(hlBGK9qLwf%2iWgD;d1r5*gBd?@YU z59LE?g#ZXY0ZK1`(hs1tLLgKgN-u!Y51_O{5L6yYFM!eyptM3TR31t%fYJ}3v_c3} z9!f8O(hs1tLMT)oN-u!Y51_O{7*rlgLlW;OIT`{~4*^KSh!3e@lmTvCGBChsavMfm zP;=%$=|fN&)-X!rfq2FN+E!+01My*PSGXuh5Ig2kA+lk_Kyu4y@fMCiu%!iQkQycc zN*h3F0Vw?;6(WBDN^gMD6QFbfln#K>22ff6N`FX!>W9)Bp!5VNT>zy6ptJ#$7J$+p zlA-#c^adzB0ZJD@=>RBg0Hp<>^oJy<eki>GN>6~&1yDKwN*h3F0Vw?;5vm_bZ-CMh zpmYJ04uH}IP+9;=e@KAphteCM^aLnf0Hp(<v;mYBfYJ}*A^I0U=>#aP0Hq(qLFJ)z z0+d#O(hp*x@=!VfN-IF=2Qg52D4hVM6`=HkXsA4tPJq%1Q2Id>R31tvKxqXi{U8!5 z52X{Jv;vfV5CN5k(g{#n0ZKmzhss0g1SqWlr5`|B^b4T$XiFawUJj!zeQ^2KFxt`w zhXccCOCKB#qb+?%%X^?&`U{{f{s&N6AsphK1Sq`#N<V<o3K39wD7^qmKY-E-kx+Rk zy#Pu-fYJ(4P<bf507^fA(hAW~c__UAN<V<o3NcW5D7^qmKY-E-u~2y^y#Pu-fYJ(a zP<bf507^fA(hBiVc__UAN<V<o0tpa#11KE;r3;|+1Sq`$N?(A|AE2~AB2+(=4uH}H zP<jHC-T<X9K<N)qS|ACkA4&&6=>jM{0ZMOx(ifog2PiF&4Al>%1E6#Pl%4>kH$dqN zQ2GOu7D$2WhtdI1x&TT~fYKYF^aUvW0ZI#`LiI!G04QAmr6)k?4N&?5l>Pvv1=67U zp>zP0E=Y6mclPjxwe^=-gG(0%hL`MM2B<;K$iU3N$G`&`M~(MOElEyEGjYkx%}iks z;DC)YL&k7H{RYs$4HE-10|x^KNJDadZf<H`34<^L1A{UHWY`llsIE|yT2Pc)oSIjX znU}6ml98(5n44IYpQivBcTaX$o(#5s@Dh;z7y~1t3doI2OpJ`M0cu!(44qF}|4aZp zPq0iJWI6-GbO^-&8_s6NIVr*u15(P&z|SDSAi}`l;_2(^7wqZp7w_X49Krw^0|yxl zG8QyU4jN!bAEgIH<1*+>1V|2<4;~{2X#`mU#=mY?6yu-W-~$=LzyO~a0i_GDR_e}f zn1jt^U|?Vs(mf5Hw?LZRkN{5+F)$!zH{Mns;|8ljn%$59&jKN<dX~BTpIfkp3j+hp z7ogAp*#x6OY!GH)2w<pS&|rXdb>D*o85kI#7Q^ZP2fR)>h4{EJa4_rxDN%>yA)gfr z0t^-)L;V;SKzXlU@V^{`7)XUSh+tqi@_d>N!&4Ajn}Go|M{%M;KZwB*BmkXwi0^Lf zVn_iAKqnYddF&=L^n(QS85lrOcUC`PF@pfe77YdlHjqT-yp0TPAb}hP@Dz^Nm$$p$ zEYd3l$&F$fg#bAIIZ<L4B_dHG5hVgq(hN#!K#f-9l!Ow$PD|5g(a2Y$#u6I>kP-zp z>kG<#Ae}H8#0FvX5+w^-CZm@q2QDZxgQuQh)5nZFOblPZripPSmw}AJi`||5eZZ<g z6$8Bdf)ocJjj&mCXfX~hCXmHJ1q+A|s}^8$>L4)?UcnC{NAYL~jE2By2#kinXb6mk zz-S1JhQMeDjE2By2#kinXb6mkz(9rocr=QG0lXqp18FqU3N$EycQo=j$k{sRqmj_n zKH$;FRFDef(Z~rP0p!t0VUR8ANTZP*AOWP&$QfA&(FTe^MvP*rg#bAIv5iJTBa#6* z5*d&q5GBnpAg2ZflxRgxNesyGYjjuRFxC7qY7luL;O61t!~k0a2sUEFZke~t3~Z1o zJZB%r;9&3s9%y_JGzf^8Qwnx=^l@ciW?+RV^>g%f1*_zOPW8d}*$_L02(ljQ<e?DM z#N^bxlGGxF{4|A35Th(Hhnay3VmrvP_`Jm2RIn33yI4Su1&M;U`hX@e!E=(uC5d?{ ziA5=R)j~aAnwMEp3^ot62ohvoN@{vhYHD$OYGQE-GXpP!2xyiqF}b8PF(<w#F)uxp z0o&FTZ1Zd&zcRqqEf%CXfadHBK&Az`y153q`Z>Eo=IqRv7{Hr`QW8rNi&IM&7+{lk zDVe#cdBvIec?=vVQ?ZPUjG!_UWIl|BZDNA)L2{#bGz35)0M7rg-BBO`hG#bxFQ+K~ z!!sXrBObBYACx?)ll?)H5uoe}+X{#$_hTx>$o!zqZy?iPnLjVTs3e05xgWO5Ga-yF zxgQ+4qr_+kjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By z2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1J zhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kin zXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeD zjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mk zz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By z2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1J zhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kin zXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeD zjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kW!5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C z(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R z7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7 zfzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c z4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C z(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S`WG8UmvsFd71*Aut*OqaiRF0;3@? z8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*O zqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8Umvs zFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF z0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71* zAut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@? z8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiQ~MnhmU1V%$(Gz3ONU^E0q zLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$( zGz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!n zMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ON zU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU z1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0q zLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0q!DtAKhQMeDjE2By z2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1J zhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kin zXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeD zjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mk zz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By z2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinC>RZa(GVC7 zfzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c z4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C z(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GY-#zz2H>6P<>L zj*_DxFd71*Aut*OqaiRF0;3@?8UmvsFd72LA%Na)#>Ge0Gs+$ffzc2c4S~@R7!85Z z5Eu=C(GVC7fsqyhxZ26M#K{dGn0e&tAJsP+0;3@?8UmvsFd71*Aut*OqaiRF0s|2O zuy!n^G_h_cWDl|CjH(+Afzc2c4S~@R7!85Z5Eu=C(GVC70eS*L?O0Of3AvqA^U&23 zGJjNlGz3ONU^E0qLtr!nMnhmU1V%$(Gz2IK0rd7Kj1Myt#-}z7GasF%mObd^j`BxC zU^E0qLtr!nMnhmU1V%$(Gz3ONfSeGZR{M0Yn?p|6jOrN;fzc2c4S~@R7!85Z5Eu=C z(GVC70rU_U?CoA^xewjnqx{hj7!85Z5Eu=C(GVC7fzc2c4S~@RASDE-)h-<>=8zIL zqiRP(U^E0qLtr!nMnhmU1V%$(Gz3ON02Ts6rM*jScf$NXN{@!XXb6mkz-S1JhQMeD zjE2By2#kgRDIq}ZcIi+tkCZSPRXZ92qaiRF0;3@?8UmvsFd71*Aut*OL#3TdD|e2j zfYA^b4S~@R7!85Z5Eu=C(GVC7fzc2kB?M^IE*&c7k`h*<YDYt0Gz3ONU^E0qLtr!n zMnhmU1V(NM43+jRt=&0tQ^2UtM?+vV1V%$(Gz3ONU^E0qLtr!nNC^R2w@ZhL`J{x| zsM^sG7!85Z5Eu=C(GVC7fzc2c4S|sx0z;*JODlJd+?XHr`Dh4?g3%Bd4S~@R7!85Z z5Eu;sazcPs?b5+&E;(T|s%JC=MnhmU1V%$(Gz3ONU^E0qLtr!nU?DJA+qtxI7tG(I z^k@i-hQMeDjE2By2#kinXb6mkz-S0i5(2bpuMReIDG8%dy`v#88UmvsFd71*Aut*O zqaiRF0;3@?*xI+$cF$-GkA}c#2#kinXb6mkz-S1JhQMeDjD`R$LV()s*1=*PEy8Nl z%+U}S4S~@R7!85Z5Eu=C(GVC7fzc3vg}`8G_fqN(n14p;(GVC7fzc2c4S~@R7!85Z z5Eu=C(GZ|#2*BF0Fgwxt=;APWTr|1rVftV+A@gDCM(NQI7!85Z5Eu=C(GVC7fzc2c z4S~@R7!DzTtNn{EJ{<fy>W9$~7!85Z5Eu=C(GVC7fzc2c4S~@RAU6bH?PHjJ7#|l6 zQ#VSFhQMeDjE2By2#kinXb6mkz-S1JhQMeDV1@vCI~r5PsK{stjE2By2#kinXb6mk zz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2A{7!85Z5Eu=C(GVC7fzc2c4S~@R z7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7 zfzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c z4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C z(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R z7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c1*0J_8UmvsFd71*Aut*O zqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8Umvs zFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF z0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71* zAut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@? z8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*O zqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqhK@yMnhmU1V%$( zGz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!n zMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ON zU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU z1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0q zLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$( zGz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU=)moz-S1J zhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kin zXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeD zjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mk zz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By z2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1J zhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kW! z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c z4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C z(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R z7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7 zfzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c z4S`WG8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71* zAut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@? z8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*O zqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8Umvs zFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF z0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71* zAut*OqaiQ~MnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU z1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0q zLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$( zGz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!n zMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ON zU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU z1V%$(Gz3ONU^E0q!DtAKhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mk zz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjPwuy E0Q~?GIsgCw literal 0 HcmV?d00001 diff --git a/da/analysis/expand_fluxes.py b/da/analysis/expand_fluxes.py index 91f2d70..3a5a0fb 100755 --- a/da/analysis/expand_fluxes.py +++ b/da/analysis/expand_fluxes.py @@ -1,8 +1,10 @@ #!/usr/bin/env python # expand_fluxes.py import sys -sys.path.append('../../') import os +sys.path.append('../../') +rootdir = os.getcwd().split('da/')[0] +analysisdir = os.path.join(rootdir, 'da/analysis') from datetime import datetime, timedelta import logging @@ -13,7 +15,7 @@ from da.analysis.tools_regions import globarea, state_to_grid from da.tools.general import create_dirs from da.analysis.tools_country import countryinfo # needed here from da.analysis.tools_transcom import transcommask, ExtendedTCRegions - +import netCDF4 as cdf import da.analysis.tools_transcom as tc import da.analysis.tools_country as ct @@ -781,7 +783,14 @@ def save_weekly_avg_agg_data(dacycle, region_aggregate='olson'): dimname = 'tc_ext' dimregs = ncf.add_region_dim(type='tc_ext') xform = True - + + elif region_aggregate == "amazon": + regfile = cdf.Dataset(os.path.join(analysisdir,'amazon_mask.nc')) + regionmask = regfile.variables['regionmask'][:] + regfile.close() + dimname = 'amazon' + dimregs = ncf.add_dim(dimname, regionmask.max()) + xform = False elif region_aggregate == "country": @@ -1106,6 +1115,7 @@ if __name__ == "__main__": save_weekly_avg_agg_data(dacycle, region_aggregate='transcom') save_weekly_avg_agg_data(dacycle, region_aggregate='transcom_extended') save_weekly_avg_agg_data(dacycle, region_aggregate='country') + save_weekly_avg_agg_data(dacycle, region_aggregate='amazon') dacycle.advance_cycle_times() diff --git a/da/analysis/expand_molefractions.py b/da/analysis/expand_molefractions.py index 29002a3..681d458 100755 --- a/da/analysis/expand_molefractions.py +++ b/da/analysis/expand_molefractions.py @@ -261,10 +261,12 @@ def write_mole_fractions(dacycle): # Get existing file obs_nums to determine match to local obs_nums - if ncf_out.variables.has_key('id'): - file_obs_nums = ncf_out.get_variable('id') + if ncf_out.variables.has_key('merge_num'): + file_obs_nums = ncf_out.get_variable('merge_num') elif ncf_out.variables.has_key('obspack_num'): file_obs_nums = ncf_out.get_variable('obspack_num') + elif ncf_out.variables.has_key('id'): + file_obs_nums = ncf_out.get_variable('id') # Get all obs_nums related to this file, determine their indices in the local arrays diff --git a/da/analysis/siteseries.py b/da/analysis/siteseries.py index 789b8c0..1c7d0cc 100755 --- a/da/analysis/siteseries.py +++ b/da/analysis/siteseries.py @@ -27,7 +27,7 @@ import da.tools.io4 as io import logging import copy from da.analysis.summarize_obs import nice_lon, nice_lat, nice_alt -import Image +from PIL import Image import urllib2 import StringIO @@ -46,6 +46,8 @@ ts_yspan = ts_y2 - ts_y1 markersize = 2 fontsize = 16 +small_logos = ['EC','RUG','LBNL-ARM','NIES-MRI','NIWA','ECN'] + """ Three routines that provide loops around the number of sites. These also create the rundirs and such and make sure the figures that were created are stamped and saved properly @@ -78,6 +80,8 @@ def site_timeseries(analysisdir,option='final'): # # Create filename and extract site codes for the sites that had day/night data separated # + if not 'co2_poc' in sitefile: continue + filename = os.path.join(analysisdir, 'data_molefractions', sitefile) saveas = os.path.join(mrdir, sitefile[:-3] + '_timeseries') @@ -158,26 +162,32 @@ def site_timeseries(analysisdir,option='final'): with open("sitetable.html", "rt") as fin: for lineout in fin: lineout = lineout.replace('site_name',f.site_name) - lineout = lineout.replace('site_country',f.site_country) - lineout = lineout.replace('site_latitude',nice_lat(f.site_latitude,'html')) - lineout = lineout.replace('site_longitude',nice_lon(f.site_longitude,'html')) - lineout = lineout.replace('site_elevation',str(f.site_elevation)) - lineout = lineout.replace('intake_height',str(f.variables['intake_height'][:].max())) - if 'co2_hip' in sitefile: - lineout = lineout.replace('site_map','http://carbontracker.eu/development/data/ct_europe/timeseries_molefractions/co2_hip_map.jpg') - else: lineout = lineout.replace('site_map',str(f.site_map)) - lineout = lineout.replace('lab_abbr',f.lab_abbr) - lineout = lineout.replace('lab_name',f.lab_name) - lineout = lineout.replace('lab_country',f.lab_country) - if 'lab_provider' in lineout: - if 'Morgui' in f.provider_1_name: - lineout = lineout.replace('lab_provider','J.A. Morgui / M. Ramonet') - else: lineout = lineout.replace('lab_provider',f.provider_1_name) - if 'lab_url' in lineout: - if f.lab_url <> '': - lineout = lineout.replace('lab_url',f.lab_url) + if 'site_country' in lineout: + if 'site_country' in f.ncattrs(): + lineout = lineout.replace('site_country',f.site_country) + else: lineout = lineout.replace('site_country','Multiple') + if abs(f.site_latitude) > 9999: + lineout = lineout.replace('site_latitude','Variable') + lineout = lineout.replace('site_longitude','Variable') + else: + lineout = lineout.replace('site_latitude',nice_lat(f.site_latitude,'html')) + lineout = lineout.replace('site_longitude',nice_lon(f.site_longitude,'html')) + if abs(f.site_latitude) > 9999 and not 'shipboard' in sitefile: + lineout = lineout.replace('site_elevation','Variable') + lineout = lineout.replace('intake_height','Variable') + else: + lineout = lineout.replace('site_elevation',str(f.site_elevation)) + lineout = lineout.replace('intake_height',str(f.variables['intake_height'][:].max())) + lineout = lineout.replace('site_map',str(f.dataset_map)) + lineout = lineout.replace('lab_1_abbr',f.lab_1_abbr) + lineout = lineout.replace('lab_1_name',f.lab_1_name) + lineout = lineout.replace('lab_1_country',f.lab_1_country) + lineout = lineout.replace('lab_1_provider',f.provider_1_name) + if 'lab_1_url' in lineout: + if 'lab_1_url' in f.ncattrs(): + lineout = lineout.replace('lab_1_url',f.lab_1_url) else: lineout = '' - lineout = lineout.replace('lab_logo',f.lab_logo) + lineout = lineout.replace('lab_1_logo',f.lab_1_logo) lineout = lineout.replace('dataset_selection',f.dataset_selection_tag) lineout = lineout.replace('dataset_project',f.dataset_project) lineout = lineout.replace('dataset_calibration_scale',f.dataset_calibration_scale) @@ -218,9 +228,14 @@ def timehistograms_new(fig, infile, option='final'): if option == 'forecast': simulated = f.get_variable('modelsamplesmean_forecast') * molefac flags = f.get_variable('flag_forecast') + mdm_fig = mdm.compress(flags == 0).mean() - longsitestring = f.site_name + ', ' + f.site_country - location = nice_lat(f.site_latitude,'python') + ', ' + nice_lon(f.site_longitude,'python') + ', ' + nice_alt(f.site_elevation) + if 'site_country' in f.ncattrs(): + longsitestring = f.site_name + ', ' + f.site_country + else: longsitestring = f.site_name + if abs(f.site_latitude) > 9999: + location = 'Variable' + else: location = nice_lat(f.site_latitude,'python') + ', ' + nice_lon(f.site_longitude,'python') + ', ' + nice_alt(f.site_elevation) SDSInfo = {} for k in f.ncattrs(): @@ -286,10 +301,22 @@ def timehistograms_new(fig, infile, option='final'): # Calculate residual and chi squared values + if SDSInfo['site_code'] == 'POC': + sel_obs = sel_obs.compress(sel_fc > -9000) + sel_mdm = sel_mdm.compress(sel_fc > -9000) + sel_flags = sel_flags.compress(sel_fc > -9000) + sel_fc = sel_fc.compress(sel_fc > -9000) + #res = sel_fc - sel_obs if option == 'final': - res = sel_fc.compress(sel_flags != 2) - sel_obs.compress(sel_flags != 2) - chi = res / np.sqrt(sel_mdm.compress(sel_flags != 2)) + if mdm.mean() < 900: + res = sel_fc.compress(sel_flags == 0) - sel_obs.compress(sel_flags == 0) + chi = res / np.sqrt(sel_mdm.compress(sel_flags == 0)) + nr_obs = sel_obs.compress(sel_flags == 0).shape[0] + else: + res = sel_fc.compress(sel_flags != 2) - sel_obs.compress(sel_flags != 2) + chi = res / np.sqrt(sel_mdm.compress(sel_flags != 2)) + nr_obs = sel_obs.compress(sel_flags != 2).shape[0] elif option == 'forecast': res = sel_fc - sel_obs chi = res / np.sqrt(sel_hqhr) @@ -297,7 +324,7 @@ def timehistograms_new(fig, infile, option='final'): # Get a scaling factor for the x-axis range. Now we will include 5 standard deviations sc = res.std() - + print 'sc',sc # If there is too little data for a reasonable PDF, skip to the next value in the loop if res.shape[0] < 10: continue @@ -327,7 +354,7 @@ def timehistograms_new(fig, infile, option='final'): if chi.mean() != chi.mean() or mdm.mean() < 900: labs = [ '%.2f $\pm$ %.2f' % (res.mean(), res.std()) , \ - 'N = %d' % sel_obs.shape[0], \ + 'N = %d' % nr_obs, \ '%s$\chi^2$= %.2f'%(strX, (chi**2).mean()) ] else: @@ -364,29 +391,30 @@ def timehistograms_new(fig, infile, option='final'): matplotlib.rcParams.update({'font.size': 18}) ax.xaxis.set_ticks_position('bottom') + ax.xaxis.labelpad = -5 ax.set_xlabel('[%s]'%units,size=16) # # All custom titles and auxiliary info are placed onto the figure directly (fig.text) in relative coordinates # fig.text(0.5, 0.02, 'Simulated - Observed %s [%s]\nData from %s to %s' %(species,units,pydates[0].strftime('%d-%b-%Y'), pydates[-1].strftime('%d-%b-%Y')), horizontalalignment='center', fontsize=fontsize) - #fig.text(0.75,0.02,'Simulated - Observed\n CO$_2$ ($\mu$mol/mol)',horizontalalignment='center',fontsize=fontsize) - fig.text(0.5, 0.35, 'model-data\nmismatch:\n%.2f %s' % (sel_mdm[0], units), horizontalalignment='center', fontsize=fontsize, color='green') + fig.text(0.5, 0.35, 'model-data\nmismatch:\n%.2f %s' % (mdm_fig, units), horizontalalignment='center', fontsize=fontsize, color='green') + #fig.text(0.5, 0.35, 'model-data\nmismatch:\n%.2f %s' % (sel_mdm.mean(), units), horizontalalignment='center', fontsize=fontsize, color='green') fig.text(0.12, 0.75, 'NH Summer\n(Jun-Sep)', horizontalalignment='center', fontsize=fontsize) fig.text(0.62, 0.75, 'NH Winter\n(Nov-Apr)', horizontalalignment='center', fontsize=fontsize) # # Title # - plt.suptitle('%s [%s]\n%s, %s, %s ' % (longsitestring, location , SDSInfo['dataset_project'], SDSInfo['lab_name'], SDSInfo['lab_country'],), fontsize=fontsize + 4) + plt.suptitle('%s [%s]\n%s, %s, %s ' % (longsitestring, location , SDSInfo['dataset_project'], SDSInfo['lab_1_name'], SDSInfo['lab_1_country'],), fontsize=fontsize + 4) # # Add info to plot # - font0= FontProperties(size=15,style='italic',weight='bold') + font0= FontProperties(size=14,style='italic',weight='bold') txt='CarbonTracker Europe\n $\copyright$ Wageningen University' clr='green' - fig.text(0.82,0.01,txt,ha='left',font_properties = font0, color=clr ) + fig.text(0.8,0.01,txt,ha='left',font_properties = font0, color=clr ) #now = dt.datetime.today() #str1 = 'CTDAS2012\n' + now.strftime('%d/%m/%y') @@ -395,7 +423,7 @@ def timehistograms_new(fig, infile, option='final'): #fig.text(0.12,0.16,str1,fontsize=0.8*fontsize,color='0.75') try: - img = urllib2.urlopen(SDSInfo['lab_logo']).read() + img = urllib2.urlopen('http://www.esrl.noaa.gov/gmd/webdata/ccgg/ObsPack/images/logos/'+SDSInfo['lab_1_logo']).read() except: logging.warning("No logo found for this program, continuing...") return fig @@ -411,7 +439,9 @@ def timehistograms_new(fig, infile, option='final'): # With newer (1.0) versions of matplotlib, you can # use the "zorder" kwarg to make the image overlay # the plot, rather than hide behind it... (e.g. zorder=10) - ax3 = fig.add_axes([0.425, 0.65, 0.15, 0.15 * height / width]) + if SDSInfo['lab_1_abbr'] in small_logos: scalingf = 2 + else: scalingf = 1 + ax3 = fig.add_axes([0.47-0.05*scalingf, 0.65, 0.15*scalingf, 0.15*scalingf * height / width]) ax3.axis('off') ax3.imshow(im, interpolation='None') @@ -438,8 +468,12 @@ def timevssite_new(fig, infile): simulated = f.get_variable('modelsamplesmean') * molefac flags = f.get_variable('flag_forecast') - longsitestring = f.site_name + ', ' + f.site_country - location = nice_lat(f.site_latitude,'python') + ', ' + nice_lon(f.site_longitude,'python') + ', ' + nice_alt(f.site_elevation) + if 'site_country' in f.ncattrs(): + longsitestring = f.site_name + ', ' + f.site_country + else: longsitestring = f.site_name + if abs(f.site_latitude) > 9999: + location = 'Variable' + else: location = nice_lat(f.site_latitude,'python') + ', ' + nice_lon(f.site_longitude,'python') + ', ' + nice_alt(f.site_elevation) SDSInfo = {} for k in f.ncattrs(): @@ -462,14 +496,10 @@ def timevssite_new(fig, infile): residual = simulated - obs + assimilated = (flags == 0.0) rejected = (flags == 2.0) notused = (flags == 99.0) - if notused.any(): - obslabel = 'Observed (not assimilated)' - else: - obslabel = 'Observed (assimilated)' - sd = pydates[0] ed = pydates[-1] @@ -493,8 +523,14 @@ def timevssite_new(fig, infile): markersize = 8 fontsize = 16 - p = ax1.plot(pydates, obs, marker='o', markeredgewidth=1, linestyle='None', markerfacecolor='None', \ - markeredgecolor='k', label=obslabel , markersize=markersize) + # + # Plot observations + # + if assimilated.any(): + p1 = ax1.plot(pydates.compress(assimilated), obs.compress(assimilated), marker='o', markeredgewidth=1, linestyle='None', markerfacecolor='None', markeredgecolor='k', label='Observed (assimilated)', markersize=markersize) + + if notused.any(): + p2 = ax1.plot(pydates.compress(notused), obs.compress(notused), marker='o', markeredgewidth=1, linestyle='None', markerfacecolor='None', markeredgecolor='tan', label='Observed (not assimilated)', markersize=markersize) # # Add the simulated values # @@ -504,8 +540,7 @@ def timevssite_new(fig, infile): # Add the rejected values if available # if rejected.any(): - r = ax1.plot(pydates.compress(rejected), simulated.compress(rejected), marker='s', markeredgewidth=1, markeredgecolor='r', \ - linestyle='None', label='Model Rejected (N=%d)' % len(pydates.compress(rejected)), markersize=markersize) + r = ax1.plot(pydates.compress(rejected), simulated.compress(rejected), marker='s', markeredgewidth=1, markerfacecolor='r', markeredgecolor='r', linestyle='None', label='Model Rejected (N=%d)' % len(pydates.compress(rejected)), markersize=markersize) # # Set up x axis labels @@ -559,7 +594,10 @@ def timevssite_new(fig, infile): # # Axes 2 # - residual = residual.compress(flags != 2) + if mdm.mean() < 900: + residual = residual.compress(flags == 0) + else: residual = residual.compress(flags != 2) + if SDSInfo['site_code'] == 'POC': residual = residual.compress(simulated > -9000) offset = 0.0 n, bins, patches = ax2.hist(residual, max(residual.shape[0] / 15, 15), normed=1, orientation='horizontal') p = plt.setp(patches, 'facecolor', 'tan' , 'edgecolor', 'tan', label='None', alpha=0.25) @@ -580,7 +618,7 @@ def timevssite_new(fig, infile): ax2.text(0.6, 0.01 + offset, labs[0], transform=ax2.transAxes, fontsize=1.1 * fontsize, horizontalalignment='center', color='k') offset += -0.05 - ax2.set_ylim(-4 * sc, 4 * sc) + ax2.set_ylim(-6 * sc, 6 * sc) ax2.spines['left'].set_position(('axes', 0.0)) ax2.spines['right'].set_color('none') @@ -607,15 +645,15 @@ def timevssite_new(fig, infile): # Title # - plt.suptitle('%s [%s]\n%s, %s, %s ' % (longsitestring, location , SDSInfo['dataset_project'], SDSInfo['lab_name'], SDSInfo['lab_country'],), fontsize=fontsize + 5) + plt.suptitle('%s [%s]\n%s, %s, %s ' % (longsitestring, location , SDSInfo['dataset_project'], SDSInfo['lab_1_name'], SDSInfo['lab_1_country'],), fontsize=fontsize + 5) # # Add info to plot # - font0= FontProperties(size=15,style='italic',weight='bold') + font0= FontProperties(size=14,style='italic',weight='bold') txt='CarbonTracker Europe\n $\copyright$ Wageningen University' clr='green' - fig.text(0.82,0.01,txt,ha='left',font_properties = font0, color=clr ) + fig.text(0.8,0.01,txt,ha='left',font_properties = font0, color=clr ) #now = dt.datetime.today() #str1 = 'CTDAS2012\n' + now.strftime('%d/%m/%y') @@ -624,7 +662,7 @@ def timevssite_new(fig, infile): #fig.text(0.12, 0.16, str1, fontsize=0.8 * fontsize, color='0.75') try: - img = urllib2.urlopen(SDSInfo['lab_logo']).read() + img = urllib2.urlopen('http://www.esrl.noaa.gov/gmd/webdata/ccgg/ObsPack/images/logos/'+SDSInfo['lab_1_logo']).read() except: logging.warning("No logo found for this program, continuing...") return fig @@ -640,7 +678,9 @@ def timevssite_new(fig, infile): # With newer (1.0) versions of matplotlib, you can # use the "zorder" kwarg to make the image overlay # the plot, rather than hide behind it... (e.g. zorder=10) - ax3 = fig.add_axes([0.7, 0.16, 0.15, 0.15 * height / width]) + if SDSInfo['lab_1_abbr'] in small_logos: scalingf = 2 + else: scalingf = 1 + ax3 = fig.add_axes([0.85-0.15*scalingf, 0.125, 0.15*scalingf, 0.15*scalingf * height / width]) ax3.axis('off') ax3.imshow(im, interpolation='None') @@ -672,8 +712,12 @@ def residuals_new(fig, infile, option): hphtr = f.get_variable('totalmolefractionvariance_forecast') * molefac * molefac flags = f.get_variable('flag_forecast') - longsitestring = f.site_name + ', ' + f.site_country - location = nice_lat(f.site_latitude,'python') + ', ' + nice_lon(f.site_longitude,'python') + ', ' + nice_alt(f.site_elevation) + if 'site_country' in f.ncattrs(): + longsitestring = f.site_name + ', ' + f.site_country + else: longsitestring = f.site_name + if abs(f.site_latitude) > 9999: + location = 'Variable' + else: location = nice_lat(f.site_latitude,'python') + ', ' + nice_lon(f.site_longitude,'python') + ', ' + nice_alt(f.site_elevation) SDSInfo = {} for k in f.ncattrs(): @@ -695,14 +739,11 @@ def residuals_new(fig, infile, option): hphtr = hphtr.compress(sampled) flags = flags.compress(sampled) + assimilated = (flags == 0.0) rejected = (flags == 2.0) notused = (flags == 99.0) residual = simulated - obs - #if notused.all(): - # return fig - #else: - obslabel = 'Residual' sd = pydates[0] ed = pydates[-1] @@ -726,32 +767,42 @@ def residuals_new(fig, infile, option): markersize = 8 fontsize = 16 - - p = ax1.plot(pydates, residual, marker='o', markeredgewidth=1, linestyle='None', markerfacecolor='None', \ - markeredgecolor='k', label=obslabel , markersize=markersize) + # + # Plot observations + # + if assimilated.any(): + p1 = ax1.plot(pydates.compress(assimilated), residual.compress(assimilated), marker='o', markeredgewidth=1, linestyle='None', markerfacecolor='None', markeredgecolor='k', label='Residual (assimilated)' , markersize=markersize,zorder=9) + if notused.any(): + p2 = ax1.plot(pydates.compress(notused), residual.compress(notused), marker='o', markeredgewidth=1, linestyle='None', markerfacecolor='None', markeredgecolor='tan', label='Residual (not assimilated)', markersize=markersize,zorder=8) # # Add the model-data mismatch # - q = ax1.fill_between(pydates, mdm, -1.0 * mdm, label='model-data mismatch', color='tan', alpha=0.25, zorder=5) - + mdm_fill = mdm.compress(assimilated).mean() + q = ax1.fill_between(pydates, mdm_fill, -1.0 * mdm_fill, label='model-data mismatch', color='tan', alpha=0.25, zorder=5) # # Add the rejected values if available # if rejected.any(): - r = ax1.plot(pydates.compress(rejected), residual.compress(rejected), marker='s', markeredgewidth=1, markeredgecolor='r', markerfacecolor='red', \ - linestyle='None', label='Model Rejected (N=%d)' % len(pydates.compress(rejected)), markersize=markersize) + r = ax1.plot(pydates.compress(rejected), residual.compress(rejected), marker='s', markeredgewidth=1, markeredgecolor='red', markerfacecolor='red', linestyle='None', label='Model Rejected (N=%d)' % len(pydates.compress(rejected)), markersize=markersize,zorder=10) # # Axes 2 # if option == 'final': - residual = simulated.compress(flags != 2) - obs.compress(flags != 2) - pydates = pydates.compress(flags != 2) - mdm = mdm.compress(flags != 2) + if mdm.mean() < 900: + residual = simulated.compress(flags == 0) - obs.compress(flags == 0) + pydates = pydates.compress(flags == 0) + mdm = mdm.compress(flags == 0) + else: + residual = simulated.compress(flags != 2) - obs.compress(flags != 2) + pydates = pydates.compress(flags != 2) + mdm = mdm.compress(flags != 2) chisquared = (residual ** 2) / mdm elif option == 'forecast': chisquared = (residual ** 2) / hphtr offset = 0.0 + + if SDSInfo['site_code'] == 'POC': residual = residual.compress(simulated > -9000) n, bins, patches = ax2.hist(residual, max(residual.shape[0] / 15, 15), normed=1, orientation='horizontal') p = plt.setp(patches, 'facecolor', 'tan' , 'edgecolor', 'tan', label='None', alpha=0.25) @@ -783,7 +834,7 @@ def residuals_new(fig, infile, option): ax2.text(0.6, 0.01 + offset, labs[0], transform=ax2.transAxes, fontsize=1.1 * fontsize, horizontalalignment='center', color='k') offset += -0.05 - ax2.set_ylim(-4 * sc, 4 * sc) + ax2.set_ylim(-6 * sc, 6 * sc) ax2.spines['left'].set_position(('axes', 0.0)) ax2.spines['right'].set_color('none') @@ -828,6 +879,7 @@ def residuals_new(fig, infile, option): ax1.grid(True, ls='-', color='0.75', axis='y') ax1.set_xlim(pltdt.date2num(dt.datetime(sd.year, 1, 1)), pltdt.date2num(dt.datetime(ed.year + 1, 1, 1))) + ax1.set_ylim(-6 * sc, 6 * sc) ym = ax1.get_ylim() ymin=ym[0] ; ymax =ym[1] for yr in range(sd.year,ed.year+1,2): @@ -836,7 +888,6 @@ def residuals_new(fig, infile, option): ax1.fill([x1,x2,x2,x1],[ymin,ymin,ymax,ymax],color='0.9',zorder=1) #ax1.set_ylim(ymin,ymax) - ax1.set_ylim(-4 * sc, 4 * sc) # # # Set Tick Font Size @@ -853,15 +904,15 @@ def residuals_new(fig, infile, option): # Title # - plt.suptitle('%s [%s]\n%s, %s, %s ' % (longsitestring, location , SDSInfo['dataset_project'], SDSInfo['lab_name'], SDSInfo['lab_country'],), fontsize=fontsize + 5) + plt.suptitle('%s [%s]\n%s, %s, %s ' % (longsitestring, location , SDSInfo['dataset_project'], SDSInfo['lab_1_name'], SDSInfo['lab_1_country'],), fontsize=fontsize + 5) # # Add info to plot # - font0= FontProperties(size=15,style='italic',weight='bold') + font0= FontProperties(size=14,style='italic',weight='bold') txt='CarbonTracker Europe\n $\copyright$ Wageningen University' clr='green' - fig.text(0.82,0.01,txt,ha='left',font_properties = font0, color=clr ) + fig.text(0.8,0.01,txt,ha='left',font_properties = font0, color=clr ) #now = dt.datetime.today() #str1 = 'CTDAS2012\n' + now.strftime('%d/%m/%y') @@ -870,7 +921,7 @@ def residuals_new(fig, infile, option): #fig.text(0.12, 0.16, str1, fontsize=0.8 * fontsize, color='0.75') try: - img = urllib2.urlopen(SDSInfo['lab_logo']).read() + img = urllib2.urlopen('http://www.esrl.noaa.gov/gmd/webdata/ccgg/ObsPack/images/logos/'+SDSInfo['lab_1_logo']).read() except: logging.warning("No logo found for this program, continuing...") return fig @@ -886,7 +937,9 @@ def residuals_new(fig, infile, option): # With newer (1.0) versions of matplotlib, you can # use the "zorder" kwarg to make the image overlay # the plot, rather than hide behind it... (e.g. zorder=10) - ax3 = fig.add_axes([0.7, 0.16, 0.15, 0.15 * height / width]) + if SDSInfo['lab_1_abbr'] in small_logos: scalingf = 2 + else: scalingf = 1 + ax3 = fig.add_axes([0.85-0.15*scalingf, 0.125, 0.15*scalingf, 0.15*scalingf * height / width]) ax3.axis('off') ax3.imshow(im, interpolation='None') @@ -901,7 +954,7 @@ if __name__ == '__main__': # started as script logging.root.setLevel(logging.DEBUG) - analysisdir = "/Storage/CO2/ingrid/carbontracker/geocarbon-ei-sibcasa-gfed4-zoom-gridded-combined/analysis/" + analysisdir = "/Users/ingrid/mnt/promise/CO2/ingrid/carbontracker/cartesius/gcp2-combined/analysis/" site_timeseries(analysisdir,option='final') sys.exit(0) diff --git a/da/analysis/summarize_obs.py b/da/analysis/summarize_obs.py index 6359d4c..ad92730 100755 --- a/da/analysis/summarize_obs.py +++ b/da/analysis/summarize_obs.py @@ -96,7 +96,8 @@ def summarize_obs(analysisdir, printfmt='html'): <TH> Lab. </TH> \ <TH> Country </TH> \ <TH> Lat, Lon, Elev. (m ASL) </TH> \ - <TH> No. Obs. Avail. </TH> \ + <TH> No. Obs. Available </TH> \ + <TH> No. Obs. Assimilated </TH> \ <TH> √R (μmol mol<sup>-1</sup>) </TH> \ <TH> √HPH (μmol mol<sup>-1</sup>) </TH> \ <TH> H(x)-y (μmol mol<sup>-1</sup>) </TH> \ @@ -113,6 +114,7 @@ def summarize_obs(analysisdir, printfmt='html'): <TD>%40s</TD>\ <TD>%s</TD>\ <TD>%d</TD>\ + <TD>%d</TD>\ <TD>%+5.2f</TD>\ <TD>%+5.2f</TD>\ <TD>%+5.2f±%5.2f</TD>\ @@ -128,6 +130,8 @@ def summarize_obs(analysisdir, printfmt='html'): table = [] for infile in infiles: + #if not 'mlo_surface-insitu' in infile: continue + #if not 'poc' in infile: continue logging.debug( infile ) f = io.CT_CDF(infile, 'read') date = f.get_variable('time') @@ -138,16 +142,46 @@ def summarize_obs(analysisdir, printfmt='html'): simulated_std = f.get_variable('modelsamplesstandarddeviation_forecast') * 1e6 hphtr = f.get_variable('totalmolefractionvariance_forecast') * 1e6 * 1e6 flag = f.get_variable('flag_forecast') - - pydates = [dt.datetime(1970, 1, 1) + dt.timedelta(seconds=int(d)) for d in date] - - pydates = np.array(pydates).compress(flag != 2) - simulated_fc = simulated_fc.compress(flag != 2) - simulated = simulated.compress(flag != 2) - obs = obs.compress(flag != 2) - mdm = mdm.compress(flag != 2) - hphtr = hphtr.compress(flag != 2) + obs_avail = len(np.ma.compressed(mdm)) + + pydates = np.array([dt.datetime(1970, 1, 1) + dt.timedelta(seconds=int(d)) for d in date]) + + sampled = (np.ma.getmaskarray(simulated) == False) + + pydates = pydates.compress(sampled) + simulated = simulated.compress(sampled) + simulated_fc = simulated_fc.compress(sampled) + obs = obs.compress(sampled) + mdm = mdm.compress(sampled) + hphtr = hphtr.compress(sampled) + flag = flag.compress(sampled) + if f.site_code.upper() == 'POC': + pydates = pydates.compress(simulated > -9000) + simulated_fc = simulated_fc.compress(simulated > -9000) + obs = obs.compress(simulated > -9000) + mdm = mdm.compress(simulated > -9000) + hphtr = hphtr.compress(simulated > -9000) + flag = flag.compress(simulated > -9000) + simulated = simulated.compress(simulated > -9000) + + if mdm.mean() > 900: + pydates = pydates.compress(flag != 2) + simulated_fc = simulated_fc.compress(flag != 2) + simulated = simulated.compress(flag != 2) + obs = obs.compress(flag != 2) + mdm = mdm.compress(flag != 2) + hphtr = hphtr.compress(flag != 2) + obs_assim = 0 + else: + pydates = pydates.compress(flag == 0) + simulated_fc = simulated_fc.compress(flag == 0) + simulated = simulated.compress(flag == 0) + obs = obs.compress(flag == 0) + mdm = mdm.compress(flag == 0) + hphtr = hphtr.compress(flag == 0) + obs_assim = len(np.ma.compressed(mdm)) + summer = [i for i, d in enumerate(pydates) if d.month in [6, 7, 8, 9] ] winter = [i for i, d in enumerate(pydates) if d.month in [11, 12, 1, 2, 3, 4] ] @@ -157,27 +191,40 @@ def summarize_obs(analysisdir, printfmt='html'): diffstd = ((simulated - obs).std()) diffsummerstd = ((simulated - obs).take(summer).std()) diffwinterstd = ((simulated - obs).take(winter).std()) - chi_sq = ((simulated_fc - obs)**2/hphtr).mean() - #chi_sq = ((simulated - obs)**2/mdm).mean() + #chi_summer = ((simulated - obs)**2/mdm).take(summer).mean() + #chi_winter = ((simulated - obs)**2/mdm).take(winter).mean() + #n_summer = simulated.take(summer).shape[0] + #n_winter = simulated.take(winter).shape[0] + #print 'summer: %0.2f, %0.2f, %0.2f, %i'%(diffsummer,diffsummerstd,chi_summer,n_summer) + #print 'winter: %0.2f, %0.2f, %0.2f, %i'%(diffwinter,diffwinterstd,chi_winter,n_winter) + chi_sq = -99 + if mdm.mean() < 900: + chi_sq = ((simulated_fc - obs)**2/hphtr).mean() + #chi_sq = ((simulated - obs)**2/mdm).mean() if mdm.mean() > 900: chi_clr = '#EEEEEE' - chi_sq = -99 elif chi_sq > 1.2: chi_clr = '#ff0000' elif chi_sq < 0.5: chi_clr = '#ff7f00' else: chi_clr = '#00cc00' - location = nice_lat(f.site_latitude,'html') + ', ' + nice_lon(f.site_longitude,'html') + ', ' + nice_alt(f.site_elevation) + if abs(f.site_latitude) > 9999: + location = 'Variable' + else:location = nice_lat(f.site_latitude,'html') + ', ' + nice_lon(f.site_longitude,'html') + ', ' + nice_alt(f.site_elevation) + if 'site_country' in f.ncattrs(): + country = f.site_country + else: country = 'Multiple' if printfmt == 'html': ss = (f.dataset_name[4:], f.site_code.upper(), f.dataset_project, - f.lab_abbr, - f.site_country, + f.lab_1_abbr, + country, location, - len(np.ma.compressed(mdm)), + obs_avail, + obs_assim, mdm.mean(), np.sqrt((simulated_std ** 2).mean()), diff, diffstd, @@ -279,16 +326,16 @@ def make_map(analysisdir): #makes a map of amount of assimilated observations pe #for lon,lat,name,n in zip(lons,lats,names,nobs): count = 0 for i in range(len(lats)): - if nobs[i] < 100: + if nobs[i] < 250: n = 0 c = 'blue' - elif nobs[i] < 200: + elif nobs[i] < 500: n = 1 c = 'green' - elif nobs[i] < 300: + elif nobs[i] < 750: n = 2 c = 'orange' - elif nobs[i] < 500: + elif nobs[i] < 1000: n = 3 c = 'brown' else: @@ -296,19 +343,24 @@ def make_map(analysisdir): #makes a map of amount of assimilated observations pe c = 'red' if lons[i] > -900: x,y = m(lons[i],lats[i]) - ax.plot(x,y,'o',color=c,markersize=8+1*n)#,markeredgecolor='k',markeredgewidth=2) + ax.plot(x,y,'o',color=c,markersize=12+1.5*n)#,markeredgecolor='k',markeredgewidth=2) #ax.annotate(labs[i],xy=m(lons[i],lats[i]),xycoords='data',fontweight='bold') else: x,y = m(169,87-count) - ax.plot(x,y,'o',color=c,markersize=8+1*n) + ax.plot(x,y,'o',color=c,markersize=12+1.5*n) ax.annotate(labs[i],xy=m(172,86-count),xycoords='data',fontweight='bold') count = count + 4 - fig.text(0.08,0.95,u'\u2022: N<100',fontsize=14,color='blue') - fig.text(0.16,0.95,u'\u2022: N<200',fontsize=15,color='green') - fig.text(0.24,0.95,u'\u2022: N<300',fontsize=16,color='orange') - fig.text(0.32,0.95,u'\u2022: N<500',fontsize=17,color='brown') - fig.text(0.40,0.95,u'\u2022: N>500',fontsize=18,color='red') + fig.text(0.15,0.945,u'\u2022',fontsize=35,color='blue') + fig.text(0.16,0.95,': N<250',fontsize=24,color='blue') + fig.text(0.30,0.94,u'\u2022',fontsize=40,color='green') + fig.text(0.31,0.95,': N<500',fontsize=24,color='green') + fig.text(0.45,0.94,u'\u2022',fontsize=45,color='orange') + fig.text(0.46,0.95,': N<750',fontsize=24,color='orange') + fig.text(0.60,0.939,u'\u2022',fontsize=50,color='brown') + fig.text(0.61,0.95,': N<1000',fontsize=24,color='brown') + fig.text(0.75,0.938,u'\u2022',fontsize=55,color='red') + fig.text(0.765,0.95,': N>1000',fontsize=24,color='red') ax.set_title('Assimilated observations',fontsize=24) font0= FontProperties(size=15,style='italic',weight='bold') @@ -471,7 +523,7 @@ if __name__ == '__main__': # started as script sys.path.append('../../') logging.root.setLevel(logging.DEBUG) - analysisdir = "/Storage/CO2/ingrid/carbontracker/geocarbon-ei-sibcasa-gfed4-zoom-gridded-combined/analysis/" + analysisdir = "/Users/ingrid/mnt/promise/CO2/ingrid/carbontracker/cartesius/gcp2-combined/analysis/" summarize_obs(analysisdir) #make_map(analysisdir) diff --git a/da/tools/initexit.py b/da/tools/initexit.py index 60a8141..48c1c64 100755 --- a/da/tools/initexit.py +++ b/da/tools/initexit.py @@ -553,8 +553,10 @@ class CycleControl(dict): nextjobid = '%s'% ( (self['time.end']+cycle*self['cyclelength']).strftime('%Y%m%d'),) nextrestartfilename = self['da.restart.fname'].replace(jobid,nextjobid) nextlogfilename = logfile.replace(jobid,nextjobid) - template += '\nexport icycle_in_job=%d\npython %s rc=%s %s >&%s\n' % (cycle+1,execcommand, nextrestartfilename, join(self.opts, ''), nextlogfilename,) - #template += '\nexport icycle_in_job=%d\npython %s rc=%s %s >&%s &\n' % (cycle+1,execcommand, nextrestartfilename, join(self.opts, ''), nextlogfilename,) + if self.daplatform.ID == 'WU capegrim': + template += '\nexport icycle_in_job=%d\npython %s rc=%s %s >&%s &\n' % (cycle+1,execcommand, nextrestartfilename, join(self.opts, ''), nextlogfilename,) + else: + template += '\nexport icycle_in_job=%d\npython %s rc=%s %s >&%s\n' % (cycle+1,execcommand, nextrestartfilename, join(self.opts, ''), nextlogfilename,) # write and submit self.daplatform.write_job(jobfile, template, jobid) diff --git a/da/tools/pipeline.py b/da/tools/pipeline.py index 7476d1f..6ef491d 100755 --- a/da/tools/pipeline.py +++ b/da/tools/pipeline.py @@ -72,7 +72,8 @@ def forward_pipeline(dacycle, platform, dasystem, samples, statevector, obsopera # This loads the results from another assimilation experiment into the current statevector if sam: - filename = os.path.join(fwddir, dacycle['time.start'].strftime('%Y%m%d'), 'savestate.nc') + filename = os.path.join(fwddir, dacycle['time.start'].strftime('%Y%m%d'), 'savestate_%s.nc'%dacycle['time.start'].strftime('%Y%m%d')) + #filename = os.path.join(fwddir, dacycle['time.start'].strftime('%Y%m%d'), 'savestate.nc') statevector.read_from_file_exceptsam(filename, 'prior') elif not legacy: filename = os.path.join(fwddir, dacycle['time.start'].strftime('%Y%m%d'), 'savestate_%s.nc'%dacycle['time.start'].strftime('%Y%m%d')) @@ -408,7 +409,7 @@ def advance(dacycle, samples, statevector, obsoperator): logging.debug("Appended ObsOperator restart and output file lists to dacycle for collection ") dacycle.output_filelist.append(dacycle['ObsOperator.inputfile']) - logging.debug("Appended Observation filename to dacycle for collection ") + logging.debug("Appended Observation filename to dacycle for collection: %s"%(dacycle['ObsOperator.inputfile'])) sampling_coords_file = os.path.join(dacycle['dir.input'], 'sample_coordinates_%s.nc' % dacycle['time.sample.stamp']) if os.path.exists(sampling_coords_file): -- GitLab