Commit 1e2ab2c1 authored by Ingrid Luijkx's avatar Ingrid Luijkx
Browse files

Several adjustments in analysis scripts to produce plots, maps, tables etc. as on the new website

parent 4e690ce5
......@@ -735,18 +735,38 @@ def save_weekly_avg_agg_data(dacycle, region_aggregate='olson'):
for j in range(19):
regionnames.append("%s_%s" % (tc.transnams[i], tc.olsonnams[j],))
regionnames.extend(tc.oifnams)
xform = False
for i, name in enumerate(regionnames):
lab = 'Aggregate_Region_%03d' % (i + 1,)
setattr(ncf, lab, name)
elif region_aggregate == "olson_extended":
regionmask = tc.olson_ext_mask
dimname = 'olson_ext'
dimregs = ncf.add_dim(dimname, regionmask.max())
xform = False
for i, name in enumerate(tc.olsonextnams):
lab = 'Aggreate_Region_%03d'%(i+1)
setattr(ncf, lab, name)
elif region_aggregate == "transcom":
regionmask = tc.transcommask
dimname = 'tc'
dimregs = ncf.add_region_dim(type='tc')
xform = False
elif region_aggregate == "transcom_extended":
regionmask = tc.transcommask
dimname = 'tc_ext'
dimregs = ncf.add_region_dim(type='tc_ext')
xform = True
elif region_aggregate == "country":
xform = False
countrydict = ct.get_countrydict()
selected = ['Russia', 'Canada', 'China', 'United States', 'EU27', 'Brazil', 'Australia', 'India'] #,'G8','UNFCCC_annex1','UNFCCC_annex2']
regionmask = np.zeros((180, 360,), 'float')
......@@ -857,10 +877,28 @@ def save_weekly_avg_agg_data(dacycle, region_aggregate='olson'):
regiondata.append(aggdata)
regiondata = np.array(regiondata)
try:
regioncov = regiondata.transpose().dot(regiondata) / (data.shape[0] - 1)
except:
regioncov = np.dot(regiondata.transpose(), regiondata) / (data.shape[0] - 1) # Huygens fix
if xform:
regiondata = ExtendedTCRegions(regiondata,cov=False)
regioncov = ExtendedTCRegions(regioncov,cov=True)
savedict = ncf.standard_var(varname=vname)
savedict['name'] = vname.replace('ensemble','covariance')
savedict['units'] = '[mol/region/s]^2'
savedict['dims'] = dimdate + dimregs + dimregs
savedict['count'] = index
savedict['values'] = regioncov
ncf.add_data(savedict)
savedict = ncf.standard_var(varname=vname)
savedict['name'] = vname
savedict['units'] = 'mol/region/s'
savedict['dims'] = dimdate + dimensemble + dimregs
elif 'flux' in vname:
......@@ -868,6 +906,9 @@ def save_weekly_avg_agg_data(dacycle, region_aggregate='olson'):
regiondata = state_to_grid(data * area, regionmask, reverse=True, mapname=region_aggregate)
if xform:
regiondata = ExtendedTCRegions(regiondata)
savedict = ncf.standard_var(varname=vname)
savedict['dims'] = dimdate + dimregs
savedict['units'] = 'mol/region/s'
......@@ -876,6 +917,8 @@ def save_weekly_avg_agg_data(dacycle, region_aggregate='olson'):
data = ncf_in.get_variable(vname)[:]
regiondata = state_to_grid(data, regionmask, reverse=True, mapname=region_aggregate)
if xform:
regiondata = ExtendedTCRegions(regiondata)
savedict = ncf.standard_var(varname=vname)
savedict['dims'] = dimdate + dimregs
......@@ -1040,7 +1083,9 @@ if __name__ == "__main__":
save_weekly_avg_tc_data(dacycle, statevector)
save_weekly_avg_ext_tc_data(dacycle)
save_weekly_avg_agg_data(dacycle, region_aggregate='olson')
save_weekly_avg_agg_data(dacycle, region_aggregate='olson_extended')
save_weekly_avg_agg_data(dacycle, region_aggregate='transcom')
save_weekly_avg_agg_data(dacycle, region_aggregate='transcom_extended')
save_weekly_avg_agg_data(dacycle, region_aggregate='country')
dacycle.advance_cycle_times()
......
No preview for this file type
This diff is collapsed.
......@@ -11,7 +11,7 @@ import da.tools.io4 as io
fontsize = 10
def nice_lat(cls):
def nice_lat(cls,format='html'):
#
# Convert latitude from decimal to cardinal
#
......@@ -22,9 +22,13 @@ def nice_lat(cls):
dec, deg = np.math.modf(cls)
return string.strip('%2d %2d\'%s' % (abs(deg), round(abs(60 * dec), 0), h))
#return string.strip('%2d %2d\'%s' % (abs(deg), round(abs(60 * dec), 0), h))
if format == 'python':
return string.strip('%3d$^\circ$%2d\'%s' % (abs(deg), round(abs(60 * dec), 0), h))
if format == 'html':
return string.strip('%3d&deg%2d\'%s' % (abs(deg), round(abs(60 * dec), 0), h))
def nice_lon(cls):
def nice_lon(cls,format='html'):
#
# Convert longitude from decimal to cardinal
#
......@@ -35,16 +39,21 @@ def nice_lon(cls):
dec, deg = np.math.modf(cls)
return string.strip('%3d %2d\'%s' % (abs(deg), round(abs(60 * dec), 0), h))
#return string.strip('%3d %2d\'%s' % (abs(deg), round(abs(60 * dec), 0), h))
if format == 'python':
return string.strip('%3d$^\circ$%2d\'%s' % (abs(deg), round(abs(60 * dec), 0), h))
if format == 'html':
return string.strip('%3d&deg%2d\'%s' % (abs(deg), round(abs(60 * dec), 0), h))
def nice_alt(cls):
#
# Reformat elevation or altitude
#
return string.strip('%10.1f masl' % round(cls, -1))
#return string.strip('%10.1f masl' % round(cls, -1))
return string.strip('%i masl' %cls)
def summarize_obs(analysisdir, printfmt='html'):
def summarize_obs(dacycle, printfmt='html'):
"""***************************************************************************************
Call example:
......@@ -93,7 +102,7 @@ def summarize_obs(analysisdir, printfmt='html'):
<TH> H(x)-y (&mu;mol mol<sup>-1</sup>) </TH> \
<TH> H(x)-y (JJAS) (&mu;mol mol<sup>-1</sup>) </TH> \
<TH> H(x)-y (NDJFMA) (&mu;mol mol<sup>-1</sup>) </TH> \
<TH> Chi_sq </TH> \
<TH> Inn. &#935;<sup>2</sup></TH> \
<TH> Site code </TH>\n \
</TR>\n"
......@@ -119,29 +128,37 @@ def summarize_obs(analysisdir, printfmt='html'):
table = []
for infile in infiles:
#logging.debug( infile )
logging.debug( infile )
f = io.CT_CDF(infile, 'read')
date = f.get_variable('time')
obs = f.get_variable('value') * 1e6
mdm = f.get_variable('modeldatamismatch') * 1e6
simulated = f.get_variable('modelsamplesmean_forecast') * 1e6
simulated_fc = f.get_variable('modelsamplesmean_forecast') * 1e6
simulated = f.get_variable('modelsamplesmean') * 1e6
simulated_std = f.get_variable('modelsamplesstandarddeviation_forecast') * 1e6
hphtr = f.get_variable('totalmolefractionvariance_forecast') * 1e6 * 1e6
flag = f.get_variable('flag_forecast')
pydates = [dt.datetime(1970, 1, 1) + dt.timedelta(seconds=int(d)) for d in date]
pydates = np.array(pydates).compress(flag != 2)
simulated_fc = simulated_fc.compress(flag != 2)
simulated = simulated.compress(flag != 2)
obs = obs.compress(flag != 2)
mdm = mdm.compress(flag != 2)
hphtr = hphtr.compress(flag != 2)
summer = [i for i, d in enumerate(pydates) if d.month in [6, 7, 8, 9] ]
winter = [i for i, d in enumerate(pydates) if d.month in [11, 12, 1, 2, 3, 4] ]
diff = ((simulated - obs).mean())
diffsummer = ((simulated - obs).take(summer).mean())
diffwinter = ((simulated - obs).take(winter).mean())
diffstd = ((simulated - obs).std())
diffsummerstd = ((simulated - obs).take(summer).std())
diffwinterstd = ((simulated - obs).take(winter).std())
chi_sq = ((simulated.compress(flag == 0) - obs.compress(flag == 0))**2/hphtr.compress(flag == 0)).mean()
chi_sq = ((simulated_fc - obs)**2/hphtr).mean()
#chi_sq = ((simulated - obs)**2/mdm).mean()
if mdm.mean() > 900:
chi_clr = '#EEEEEE'
chi_sq = -99
......@@ -151,10 +168,10 @@ def summarize_obs(analysisdir, printfmt='html'):
chi_clr = '#ff7f00'
else: chi_clr = '#00cc00'
location = nice_lat(f.site_latitude) + ', ' + nice_lon(f.site_longitude) + ', ' + nice_alt(f.site_elevation)
location = nice_lat(f.site_latitude,'html') + ', ' + nice_lon(f.site_longitude,'html') + ', ' + nice_alt(f.site_elevation)
if printfmt == 'html':
ss = (f.site_code.upper(),
ss = (f.dataset_name[4:],
f.site_code.upper(),
f.dataset_project,
f.lab_abbr,
......@@ -202,6 +219,108 @@ def summarize_obs(analysisdir, printfmt='html'):
logging.info("File written with summary: %s" % saveas)
def make_map(analysisdir): #makes a map of amount of assimilated observations per site
import netCDF4 as cdf
import matplotlib.pyplot as plt
import matplotlib
from maptools import *
from matplotlib.font_manager import FontProperties
sumdir = os.path.join(analysisdir, 'summary')
if not os.path.exists(sumdir):
logging.info("Creating new directory " + sumdir)
os.makedirs(sumdir)
mrdir = os.path.join(analysisdir, 'data_molefractions')
if not os.path.exists(mrdir):
logging.error("Input directory does not exist (%s), exiting... " % mrdir)
return None
mrfiles = os.listdir(mrdir)
infiles = [os.path.join(mrdir, f) for f in mrfiles if f.endswith('.nc')]
lats=[]
lons=[]
labs=[]
nobs=[]
for files in infiles:
f=cdf.Dataset(files)
if f.variables['modeldatamismatch'][:].max() < 0.001:
sim = f.variables['modelsamplesmean'][:]
flag = f.variables['flag_forecast'][:]
sim = sim.compress(flag != 2)
sampled = (np.ma.getmaskarray(sim) == False)
sim = sim.compress(sampled)
lats.append(f.site_latitude)
lons.append(f.site_longitude)
labs.append(f.site_code)
nobs.append(len(sim))
f.close()
lats = np.array(lats)
lons = np.array(lons)
labs = np.array(labs)
nobs = np.array(nobs)
saveas = os.path.join(sumdir, 'networkmap')
logging.info("Making map: %s" % saveas)
fig = plt.figure(1,figsize=(20,12))
ax = fig.add_axes([0.05,0.1,0.9,0.8])
#m,nx,ny = select_map('Global Cylinder')
m,nx,ny = select_map('Europe Conformal')
m.drawcountries()
m.drawcoastlines()
parallels = arange(-90.,91,30.)
m.drawparallels(parallels,color='grey',linewidth=0.5,dashes=[1,0.001],labels=[1,0,0,1],fontsize=16)
meridians = arange(-180.,181.,60.)
m.drawmeridians(meridians,color='grey',linewidth=0.5,dashes=[1,0.001],labels=[1,0,0,1],fontsize=16)
#for lon,lat,name,n in zip(lons,lats,names,nobs):
count = 0
for i in range(len(lats)):
if nobs[i] < 100:
n = 0
c = 'blue'
elif nobs[i] < 200:
n = 1
c = 'green'
elif nobs[i] < 300:
n = 2
c = 'orange'
elif nobs[i] < 500:
n = 3
c = 'brown'
else:
n = 4
c = 'red'
if lons[i] > -900:
x,y = m(lons[i],lats[i])
ax.plot(x,y,'o',color=c,markersize=8+1*n)#,markeredgecolor='k',markeredgewidth=2)
#ax.annotate(labs[i],xy=m(lons[i],lats[i]),xycoords='data',fontweight='bold')
else:
x,y = m(169,87-count)
ax.plot(x,y,'o',color=c,markersize=8+1*n)
ax.annotate(labs[i],xy=m(172,86-count),xycoords='data',fontweight='bold')
count = count + 4
fig.text(0.08,0.95,u'\u2022: N<100',fontsize=14,color='blue')
fig.text(0.16,0.95,u'\u2022: N<200',fontsize=15,color='green')
fig.text(0.24,0.95,u'\u2022: N<300',fontsize=16,color='orange')
fig.text(0.32,0.95,u'\u2022: N<500',fontsize=17,color='brown')
fig.text(0.40,0.95,u'\u2022: N>500',fontsize=18,color='red')
ax.set_title('Assimilated observations',fontsize=24)
font0= FontProperties(size=15,style='italic',weight='bold')
txt='CarbonTracker Europe\n $\copyright$ Wageningen University'
clr='green'
fig.text(0.82,0.01,txt,ha='left',font_properties = font0, color=clr )
saveas=os.path.join(sumdir,'networkmap.png')
fig.savefig(saveas,dpi=200)
saveas=os.path.join(sumdir,'networkmap.large.png')
fig.savefig(saveas,dpi=300)
close(fig)
def summarize_stats(dacycle):
"""
Summarize the statistics of the observations for this cycle
......@@ -352,10 +471,10 @@ if __name__ == '__main__': # started as script
sys.path.append('../../')
logging.root.setLevel(logging.DEBUG)
analysisdir = "/Storage/CO2/ingrid/carbontracker/zeus/ctdas_nobcb_zoom_ecoregions_2000_2010_GEOCARBON/analysis/"
analysisdir = "/Storage/CO2/ingrid/carbontracker/geocarbon-ei-sibcasa-gfed4-zoom-gridded-combined/analysis/"
summarize_obs(analysisdir)
#make_map(analysisdir)
sys.exit(0)
......
......@@ -20,7 +20,7 @@ import subprocess
def time_avg(dacycle,avg='transcom'):
""" Function to create a set of averaged files in a folder, needed to make longer term means """
if avg not in ['transcom','olson','country','flux1x1']:
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
......@@ -36,7 +36,7 @@ def time_avg(dacycle,avg='transcom'):
if new_year(dacycle):
yearly_avg(dacycle,avg)
longterm_avg(dacycle,avg)
longterm_avg(dacycle,avg)
def new_month(dacycle):
""" check whether we just entered a new month"""
......@@ -57,7 +57,7 @@ def new_year(dacycle):
def daily_avg(dacycle,avg):
""" Function to create a set of daily files in a folder, needed to make longer term means """
if avg not in ['transcom','olson','country','flux1x1']:
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
......@@ -89,7 +89,7 @@ def daily_avg(dacycle,avg):
def monthly_avg(dacycle,avg):
""" Function to average a set of files in a folder from daily to monthly means """
if avg not in ['transcom','olson','country','flux1x1']:
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
......@@ -135,7 +135,7 @@ def monthly_avg(dacycle,avg):
def yearly_avg(dacycle,avg):
""" Function to average a set of files in a folder from monthly to yearly means """
if avg not in ['transcom','olson','country','flux1x1']:
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
......@@ -178,7 +178,7 @@ def yearly_avg(dacycle,avg):
def longterm_avg(dacycle,avg):
""" Function to average a set of files in a folder from monthly to yearly means """
if avg not in ['transcom','olson','country','flux1x1']:
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
......@@ -214,9 +214,12 @@ if __name__ == "__main__":
dacycle.setup()
dacycle.parse_times()
time_avg(dacycle,avg='flux1x1')
time_avg(dacycle,avg='transcom')
time_avg(dacycle,avg='olson')
time_avg(dacycle,avg='country')
while dacycle['time.end'] < dacycle['time.finish']:
time_avg(dacycle,avg='flux1x1')
time_avg(dacycle,avg='transcom')
time_avg(dacycle,avg='transcom_extended')
time_avg(dacycle,avg='olson')
time_avg(dacycle,avg='olson_extended')
time_avg(dacycle,avg='country')
dacycle.advance_cycle_times()
......@@ -20,6 +20,11 @@ olsonmask = cdf_temp.get_variable('land_ecosystems')
oifmask = cdf_temp.get_variable('ocean_regions')
dummy = cdf_temp.close()
matrix_file = os.path.join(analysisdir, 'olson_extended.nc')
cdf_temp = io.CT_CDF(matrix_file, 'read')
olson_ext_mask = cdf_temp.get_variable('regions')
dummy = cdf_temp.close()
# Names and short names of TransCom regions
transshort = []
......@@ -50,6 +55,16 @@ for line in temp:
olsonnams.append(name.strip('"'))
olsonshort.append(abbr)
olsonextnams = []
matrix_file = os.path.join(analysisdir, 'olson_extended.nc')
cdf_temp = io.CT_CDF(matrix_file, 'read')
keys = cdf_temp.ncattrs()
keys.sort()
for k in keys:
if 'Region' in k:
olsonextnams.append(getattr(cdf_temp, k))
cdf_temp.close()
ext_transnams = []
ext_transshort = []
ext_transcomps = []
......@@ -323,5 +338,5 @@ if __name__ == '__main__':
print olsonnams
print olsonshort
print ext_transcomps
print olsonextnams
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment