Commit 85fb7df0 authored by Ingrid Luijkx's avatar Ingrid Luijkx
Browse files

Updates in analysis for producing graphs etc. for CTE2015 website, plus some...

Updates in analysis for producing graphs etc. for CTE2015 website, plus some minor changes for Eric's SAM runs.
parent bb5bb773
#!/usr/bin/env python
# expand_fluxes.py
import sys
sys.path.append('../../')
import os
sys.path.append('../../')
rootdir = os.getcwd().split('da/')[0]
analysisdir = os.path.join(rootdir, 'da/analysis')
from datetime import datetime, timedelta
import logging
......@@ -13,7 +15,7 @@ from da.analysis.tools_regions import globarea, state_to_grid
from da.tools.general import create_dirs
from da.analysis.tools_country import countryinfo # needed here
from da.analysis.tools_transcom import transcommask, ExtendedTCRegions
import netCDF4 as cdf
import da.analysis.tools_transcom as tc
import da.analysis.tools_country as ct
......@@ -781,7 +783,14 @@ def save_weekly_avg_agg_data(dacycle, region_aggregate='olson'):
dimname = 'tc_ext'
dimregs = ncf.add_region_dim(type='tc_ext')
xform = True
elif region_aggregate == "amazon":
regfile = cdf.Dataset(os.path.join(analysisdir,'amazon_mask.nc'))
regionmask = regfile.variables['regionmask'][:]
regfile.close()
dimname = 'amazon'
dimregs = ncf.add_dim(dimname, regionmask.max())
xform = False
elif region_aggregate == "country":
......@@ -1106,6 +1115,7 @@ if __name__ == "__main__":
save_weekly_avg_agg_data(dacycle, region_aggregate='transcom')
save_weekly_avg_agg_data(dacycle, region_aggregate='transcom_extended')
save_weekly_avg_agg_data(dacycle, region_aggregate='country')
save_weekly_avg_agg_data(dacycle, region_aggregate='amazon')
dacycle.advance_cycle_times()
......
......@@ -261,10 +261,12 @@ def write_mole_fractions(dacycle):
# Get existing file obs_nums to determine match to local obs_nums
if ncf_out.variables.has_key('id'):
file_obs_nums = ncf_out.get_variable('id')
if ncf_out.variables.has_key('merge_num'):
file_obs_nums = ncf_out.get_variable('merge_num')
elif ncf_out.variables.has_key('obspack_num'):
file_obs_nums = ncf_out.get_variable('obspack_num')
elif ncf_out.variables.has_key('id'):
file_obs_nums = ncf_out.get_variable('id')
# Get all obs_nums related to this file, determine their indices in the local arrays
......
This diff is collapsed.
......@@ -96,7 +96,8 @@ def summarize_obs(analysisdir, printfmt='html'):
<TH> Lab. </TH> \
<TH> Country </TH> \
<TH> Lat, Lon, Elev. (m ASL) </TH> \
<TH> No. Obs. Avail. </TH> \
<TH> No. Obs. Available </TH> \
<TH> No. Obs. Assimilated </TH> \
<TH> &#8730;R (&mu;mol mol<sup>-1</sup>) </TH> \
<TH> &#8730;HPH (&mu;mol mol<sup>-1</sup>) </TH> \
<TH> H(x)-y (&mu;mol mol<sup>-1</sup>) </TH> \
......@@ -113,6 +114,7 @@ def summarize_obs(analysisdir, printfmt='html'):
<TD>%40s</TD>\
<TD>%s</TD>\
<TD>%d</TD>\
<TD>%d</TD>\
<TD>%+5.2f</TD>\
<TD>%+5.2f</TD>\
<TD>%+5.2f&plusmn;%5.2f</TD>\
......@@ -128,6 +130,8 @@ def summarize_obs(analysisdir, printfmt='html'):
table = []
for infile in infiles:
#if not 'mlo_surface-insitu' in infile: continue
#if not 'poc' in infile: continue
logging.debug( infile )
f = io.CT_CDF(infile, 'read')
date = f.get_variable('time')
......@@ -138,16 +142,46 @@ def summarize_obs(analysisdir, printfmt='html'):
simulated_std = f.get_variable('modelsamplesstandarddeviation_forecast') * 1e6
hphtr = f.get_variable('totalmolefractionvariance_forecast') * 1e6 * 1e6
flag = f.get_variable('flag_forecast')
pydates = [dt.datetime(1970, 1, 1) + dt.timedelta(seconds=int(d)) for d in date]
pydates = np.array(pydates).compress(flag != 2)
simulated_fc = simulated_fc.compress(flag != 2)
simulated = simulated.compress(flag != 2)
obs = obs.compress(flag != 2)
mdm = mdm.compress(flag != 2)
hphtr = hphtr.compress(flag != 2)
obs_avail = len(np.ma.compressed(mdm))
pydates = np.array([dt.datetime(1970, 1, 1) + dt.timedelta(seconds=int(d)) for d in date])
sampled = (np.ma.getmaskarray(simulated) == False)
pydates = pydates.compress(sampled)
simulated = simulated.compress(sampled)
simulated_fc = simulated_fc.compress(sampled)
obs = obs.compress(sampled)
mdm = mdm.compress(sampled)
hphtr = hphtr.compress(sampled)
flag = flag.compress(sampled)
if f.site_code.upper() == 'POC':
pydates = pydates.compress(simulated > -9000)
simulated_fc = simulated_fc.compress(simulated > -9000)
obs = obs.compress(simulated > -9000)
mdm = mdm.compress(simulated > -9000)
hphtr = hphtr.compress(simulated > -9000)
flag = flag.compress(simulated > -9000)
simulated = simulated.compress(simulated > -9000)
if mdm.mean() > 900:
pydates = pydates.compress(flag != 2)
simulated_fc = simulated_fc.compress(flag != 2)
simulated = simulated.compress(flag != 2)
obs = obs.compress(flag != 2)
mdm = mdm.compress(flag != 2)
hphtr = hphtr.compress(flag != 2)
obs_assim = 0
else:
pydates = pydates.compress(flag == 0)
simulated_fc = simulated_fc.compress(flag == 0)
simulated = simulated.compress(flag == 0)
obs = obs.compress(flag == 0)
mdm = mdm.compress(flag == 0)
hphtr = hphtr.compress(flag == 0)
obs_assim = len(np.ma.compressed(mdm))
summer = [i for i, d in enumerate(pydates) if d.month in [6, 7, 8, 9] ]
winter = [i for i, d in enumerate(pydates) if d.month in [11, 12, 1, 2, 3, 4] ]
......@@ -157,27 +191,40 @@ def summarize_obs(analysisdir, printfmt='html'):
diffstd = ((simulated - obs).std())
diffsummerstd = ((simulated - obs).take(summer).std())
diffwinterstd = ((simulated - obs).take(winter).std())
chi_sq = ((simulated_fc - obs)**2/hphtr).mean()
#chi_sq = ((simulated - obs)**2/mdm).mean()
#chi_summer = ((simulated - obs)**2/mdm).take(summer).mean()
#chi_winter = ((simulated - obs)**2/mdm).take(winter).mean()
#n_summer = simulated.take(summer).shape[0]
#n_winter = simulated.take(winter).shape[0]
#print 'summer: %0.2f, %0.2f, %0.2f, %i'%(diffsummer,diffsummerstd,chi_summer,n_summer)
#print 'winter: %0.2f, %0.2f, %0.2f, %i'%(diffwinter,diffwinterstd,chi_winter,n_winter)
chi_sq = -99
if mdm.mean() < 900:
chi_sq = ((simulated_fc - obs)**2/hphtr).mean()
#chi_sq = ((simulated - obs)**2/mdm).mean()
if mdm.mean() > 900:
chi_clr = '#EEEEEE'
chi_sq = -99
elif chi_sq > 1.2:
chi_clr = '#ff0000'
elif chi_sq < 0.5:
chi_clr = '#ff7f00'
else: chi_clr = '#00cc00'
location = nice_lat(f.site_latitude,'html') + ', ' + nice_lon(f.site_longitude,'html') + ', ' + nice_alt(f.site_elevation)
if abs(f.site_latitude) > 9999:
location = 'Variable'
else:location = nice_lat(f.site_latitude,'html') + ', ' + nice_lon(f.site_longitude,'html') + ', ' + nice_alt(f.site_elevation)
if 'site_country' in f.ncattrs():
country = f.site_country
else: country = 'Multiple'
if printfmt == 'html':
ss = (f.dataset_name[4:],
f.site_code.upper(),
f.dataset_project,
f.lab_abbr,
f.site_country,
f.lab_1_abbr,
country,
location,
len(np.ma.compressed(mdm)),
obs_avail,
obs_assim,
mdm.mean(),
np.sqrt((simulated_std ** 2).mean()),
diff, diffstd,
......@@ -279,16 +326,16 @@ def make_map(analysisdir): #makes a map of amount of assimilated observations pe
#for lon,lat,name,n in zip(lons,lats,names,nobs):
count = 0
for i in range(len(lats)):
if nobs[i] < 100:
if nobs[i] < 250:
n = 0
c = 'blue'
elif nobs[i] < 200:
elif nobs[i] < 500:
n = 1
c = 'green'
elif nobs[i] < 300:
elif nobs[i] < 750:
n = 2
c = 'orange'
elif nobs[i] < 500:
elif nobs[i] < 1000:
n = 3
c = 'brown'
else:
......@@ -296,19 +343,24 @@ def make_map(analysisdir): #makes a map of amount of assimilated observations pe
c = 'red'
if lons[i] > -900:
x,y = m(lons[i],lats[i])
ax.plot(x,y,'o',color=c,markersize=8+1*n)#,markeredgecolor='k',markeredgewidth=2)
ax.plot(x,y,'o',color=c,markersize=12+1.5*n)#,markeredgecolor='k',markeredgewidth=2)
#ax.annotate(labs[i],xy=m(lons[i],lats[i]),xycoords='data',fontweight='bold')
else:
x,y = m(169,87-count)
ax.plot(x,y,'o',color=c,markersize=8+1*n)
ax.plot(x,y,'o',color=c,markersize=12+1.5*n)
ax.annotate(labs[i],xy=m(172,86-count),xycoords='data',fontweight='bold')
count = count + 4
fig.text(0.08,0.95,u'\u2022: N<100',fontsize=14,color='blue')
fig.text(0.16,0.95,u'\u2022: N<200',fontsize=15,color='green')
fig.text(0.24,0.95,u'\u2022: N<300',fontsize=16,color='orange')
fig.text(0.32,0.95,u'\u2022: N<500',fontsize=17,color='brown')
fig.text(0.40,0.95,u'\u2022: N>500',fontsize=18,color='red')
fig.text(0.15,0.945,u'\u2022',fontsize=35,color='blue')
fig.text(0.16,0.95,': N<250',fontsize=24,color='blue')
fig.text(0.30,0.94,u'\u2022',fontsize=40,color='green')
fig.text(0.31,0.95,': N<500',fontsize=24,color='green')
fig.text(0.45,0.94,u'\u2022',fontsize=45,color='orange')
fig.text(0.46,0.95,': N<750',fontsize=24,color='orange')
fig.text(0.60,0.939,u'\u2022',fontsize=50,color='brown')
fig.text(0.61,0.95,': N<1000',fontsize=24,color='brown')
fig.text(0.75,0.938,u'\u2022',fontsize=55,color='red')
fig.text(0.765,0.95,': N>1000',fontsize=24,color='red')
ax.set_title('Assimilated observations',fontsize=24)
font0= FontProperties(size=15,style='italic',weight='bold')
......@@ -471,7 +523,7 @@ if __name__ == '__main__': # started as script
sys.path.append('../../')
logging.root.setLevel(logging.DEBUG)
analysisdir = "/Storage/CO2/ingrid/carbontracker/geocarbon-ei-sibcasa-gfed4-zoom-gridded-combined/analysis/"
analysisdir = "/Users/ingrid/mnt/promise/CO2/ingrid/carbontracker/cartesius/gcp2-combined/analysis/"
summarize_obs(analysisdir)
#make_map(analysisdir)
......
......@@ -553,8 +553,10 @@ class CycleControl(dict):
nextjobid = '%s'% ( (self['time.end']+cycle*self['cyclelength']).strftime('%Y%m%d'),)
nextrestartfilename = self['da.restart.fname'].replace(jobid,nextjobid)
nextlogfilename = logfile.replace(jobid,nextjobid)
template += '\nexport icycle_in_job=%d\npython %s rc=%s %s >&%s\n' % (cycle+1,execcommand, nextrestartfilename, join(self.opts, ''), nextlogfilename,)
#template += '\nexport icycle_in_job=%d\npython %s rc=%s %s >&%s &\n' % (cycle+1,execcommand, nextrestartfilename, join(self.opts, ''), nextlogfilename,)
if self.daplatform.ID == 'WU capegrim':
template += '\nexport icycle_in_job=%d\npython %s rc=%s %s >&%s &\n' % (cycle+1,execcommand, nextrestartfilename, join(self.opts, ''), nextlogfilename,)
else:
template += '\nexport icycle_in_job=%d\npython %s rc=%s %s >&%s\n' % (cycle+1,execcommand, nextrestartfilename, join(self.opts, ''), nextlogfilename,)
# write and submit
self.daplatform.write_job(jobfile, template, jobid)
......
......@@ -72,7 +72,8 @@ def forward_pipeline(dacycle, platform, dasystem, samples, statevector, obsopera
# This loads the results from another assimilation experiment into the current statevector
if sam:
filename = os.path.join(fwddir, dacycle['time.start'].strftime('%Y%m%d'), 'savestate.nc')
filename = os.path.join(fwddir, dacycle['time.start'].strftime('%Y%m%d'), 'savestate_%s.nc'%dacycle['time.start'].strftime('%Y%m%d'))
#filename = os.path.join(fwddir, dacycle['time.start'].strftime('%Y%m%d'), 'savestate.nc')
statevector.read_from_file_exceptsam(filename, 'prior')
elif not legacy:
filename = os.path.join(fwddir, dacycle['time.start'].strftime('%Y%m%d'), 'savestate_%s.nc'%dacycle['time.start'].strftime('%Y%m%d'))
......@@ -408,7 +409,7 @@ def advance(dacycle, samples, statevector, obsoperator):
logging.debug("Appended ObsOperator restart and output file lists to dacycle for collection ")
dacycle.output_filelist.append(dacycle['ObsOperator.inputfile'])
logging.debug("Appended Observation filename to dacycle for collection ")
logging.debug("Appended Observation filename to dacycle for collection: %s"%(dacycle['ObsOperator.inputfile']))
sampling_coords_file = os.path.join(dacycle['dir.input'], 'sample_coordinates_%s.nc' % dacycle['time.sample.stamp'])
if os.path.exists(sampling_coords_file):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment