Commit c6fa34ee authored by karolina's avatar karolina
Browse files

further cleanup of names: object names to lowercase

parent 5bb5096d
......@@ -38,26 +38,26 @@ def proceed_dialog(txt, yes=['y', 'yes'], all=['a', 'all', 'yes-to-all']):
return 2
return 0
def save_weekly_avg_1x1_data(DaCycle, StateVector):
def save_weekly_avg_1x1_data(dacycle, statevector):
"""
Function creates a NetCDF file with output on 1x1 degree grid. It uses the flux data written by the
:class:`~da.baseclasses.obsoperator.ObsOperator.py`, and multiplies these with the mapped parameters and
variance (not covariance!) from the :class:`~da.baseclasses.statevector.StateVector`.
:param DaCycle: a :class:`~da.tools.initexit.CycleControl` object
:param StateVector: a :class:`~da.baseclasses.statevector.StateVector`
:param dacycle: a :class:`~da.tools.initexit.CycleControl` object
:param statevector: a :class:`~da.baseclasses.statevector.StateVector`
:rtype: None
"""
#
dirname = create_dirs(os.path.join(DaCycle['dir.analysis'], 'data_flux1x1_weekly'))
dirname = create_dirs(os.path.join(dacycle['dir.analysis'], 'data_flux1x1_weekly'))
#
# Some help variables
#
dectime0 = date2num(datetime(2000, 1, 1))
dt = DaCycle['cyclelength']
startdate = DaCycle['time.start']
enddate = DaCycle['time.end']
nlag = StateVector.nlag
dt = dacycle['cyclelength']
startdate = dacycle['time.start']
enddate = dacycle['time.end']
nlag = statevector.nlag
logging.debug("DA Cycle start date is %s" % startdate.strftime('%Y-%m-%d %H:%M'))
logging.debug("DA Cycle end date is %s" % enddate.strftime('%Y-%m-%d %H:%M'))
......@@ -72,7 +72,7 @@ def save_weekly_avg_1x1_data(DaCycle, StateVector):
# Create dimensions and lat/lon grid
#
dimgrid = ncf.add_latlon_dim()
dimensemble = ncf.add_dim('members', StateVector.nmembers)
dimensemble = ncf.add_dim('members', statevector.nmembers)
dimdate = ncf.add_date_dim()
#
# set title and tell GMT that we are using "pixel registration"
......@@ -91,14 +91,14 @@ def save_weekly_avg_1x1_data(DaCycle, StateVector):
#
# if not, process this cycle. Start by getting flux input data from CTDAS
#
filename = os.path.join(DaCycle['dir.output'], 'flux1x1_%s_%s.nc' % (startdate.strftime('%Y%m%d%H'), enddate.strftime('%Y%m%d%H')))
filename = os.path.join(dacycle['dir.output'], 'flux1x1_%s_%s.nc' % (startdate.strftime('%Y%m%d%H'), enddate.strftime('%Y%m%d%H')))
file = io.CT_Read(filename, 'read')
bio = np.array(file.get_variable(DaCycle.DaSystem['background.co2.bio.flux']))
ocean = np.array(file.get_variable(DaCycle.DaSystem['background.co2.ocean.flux']))
fire = np.array(file.get_variable(DaCycle.DaSystem['background.co2.fires.flux']))
fossil = np.array(file.get_variable(DaCycle.DaSystem['background.co2.fossil.flux']))
#mapped_parameters = np.array(file.get_variable(DaCycle.DaSystem['final.param.mean.1x1']))
bio = np.array(file.get_variable(dacycle.dasystem['background.co2.bio.flux']))
ocean = np.array(file.get_variable(dacycle.dasystem['background.co2.ocean.flux']))
fire = np.array(file.get_variable(dacycle.dasystem['background.co2.fires.flux']))
fossil = np.array(file.get_variable(dacycle.dasystem['background.co2.fossil.flux']))
#mapped_parameters = np.array(file.get_variable(dacycle.dasystem['final.param.mean.1x1']))
file.close()
next = ncf.inq_unlimlen()[0]
......@@ -108,7 +108,7 @@ def save_weekly_avg_1x1_data(DaCycle, StateVector):
for prior in [True, False]:
#
# Now fill the StateVector with the prior values for this time step. Note that the prior value for this time step
# Now fill the statevector with the prior values for this time step. Note that the prior value for this time step
# occurred nlag time steps ago, so we make a shift in the output directory, but only if we are more than nlag cycle away from the start date..
#
......@@ -116,24 +116,24 @@ def save_weekly_avg_1x1_data(DaCycle, StateVector):
qual_short = 'prior'
for n in range(nlag, 0, -1):
priordate = enddate - timedelta(dt.days * n)
savedir = DaCycle['dir.output'].replace(startdate.strftime('%Y%m%d'), priordate.strftime('%Y%m%d'))
savedir = dacycle['dir.output'].replace(startdate.strftime('%Y%m%d'), priordate.strftime('%Y%m%d'))
filename = os.path.join(savedir, 'savestate_%s.nc' % priordate.strftime('%Y%m%d'))
if os.path.exists(filename):
StateVector.read_from_file(filename, qual=qual_short)
gridmean, gridensemble = StateVector.state_to_grid(lag=n)
statevector.read_from_file(filename, qual=qual_short)
gridmean, gridensemble = statevector.state_to_grid(lag=n)
# Replace the mean statevector by all ones (assumed priors)
gridmean = StateVector.vector2grid(vectordata=np.ones(StateVector.nparams,))
gridmean = statevector.vector2grid(vectordata=np.ones(statevector.nparams,))
logging.debug('Read prior dataset from file %s, sds %d: ' % (filename, n))
break
else:
qual_short = 'opt'
savedir = DaCycle['dir.output']
savedir = dacycle['dir.output']
filename = os.path.join(savedir, 'savestate_%s.nc' % startdate.strftime('%Y%m%d'))
StateVector.read_from_file(filename, qual=qual_short)
gridmean, gridensemble = StateVector.state_to_grid(lag=1)
statevector.read_from_file(filename, qual=qual_short)
gridmean, gridensemble = statevector.state_to_grid(lag=1)
logging.debug('Read posterior dataset from file %s, sds %d: ' % (filename, 1))
#
......@@ -213,29 +213,29 @@ def save_weekly_avg_1x1_data(DaCycle, StateVector):
return saveas
def save_weekly_avg_state_data(DaCycle, StateVector):
def save_weekly_avg_state_data(dacycle, statevector):
"""
Function creates a NetCDF file with output for all parameters. It uses the flux data written by the
:class:`~da.baseclasses.obsoperator.ObsOperator.py`, and multiplies these with the mapped parameters and
variance (not covariance!) from the :class:`~da.baseclasses.statevector.StateVector`.
:param DaCycle: a :class:`~da.tools.initexit.CycleControl` object
:param StateVector: a :class:`~da.baseclasses.statevector.StateVector`
:param dacycle: a :class:`~da.tools.initexit.CycleControl` object
:param statevector: a :class:`~da.baseclasses.statevector.StateVector`
:rtype: None
"""
dirname = create_dirs(os.path.join(DaCycle['dir.analysis'], 'data_state_weekly'))
dirname = create_dirs(os.path.join(dacycle['dir.analysis'], 'data_state_weekly'))
#
# Some help variables
#
dectime0 = date2num(datetime(2000, 1, 1))
dt = DaCycle['cyclelength']
startdate = DaCycle['time.start']
enddate = DaCycle['time.end']
nlag = StateVector.nlag
dt = dacycle['cyclelength']
startdate = dacycle['time.start']
enddate = dacycle['time.end']
nlag = statevector.nlag
area = globarea()
vectorarea = StateVector.grid2vector(griddata=area, method='sum')
vectorarea = statevector.grid2vector(griddata=area, method='sum')
logging.debug("DA Cycle start date is %s" % startdate.strftime('%Y-%m-%d %H:%M'))
logging.debug("DA Cycle end date is %s" % enddate.strftime('%Y-%m-%d %H:%M'))
......@@ -249,8 +249,8 @@ def save_weekly_avg_state_data(DaCycle, StateVector):
#
# Create dimensions and lat/lon grid
#
dimregs = ncf.add_dim('nparameters', StateVector.nparams)
dimmembers = ncf.add_dim('nmembers', StateVector.nmembers)
dimregs = ncf.add_dim('nparameters', statevector.nparams)
dimmembers = ncf.add_dim('nmembers', statevector.nmembers)
dimdate = ncf.add_date_dim()
#
# set title and tell GMT that we are using "pixel registration"
......@@ -270,57 +270,51 @@ def save_weekly_avg_state_data(DaCycle, StateVector):
#
# if not, process this cycle. Start by getting flux input data from CTDAS
#
filename = os.path.join(DaCycle['dir.output'], 'flux1x1_%s_%s.nc' % (startdate.strftime('%Y%m%d%H'), enddate.strftime('%Y%m%d%H')))
filename = os.path.join(dacycle['dir.output'], 'flux1x1_%s_%s.nc' % (startdate.strftime('%Y%m%d%H'), enddate.strftime('%Y%m%d%H')))
file = io.CT_Read(filename, 'read')
bio = np.array(file.get_variable(DaCycle.DaSystem['background.co2.bio.flux']))
ocean = np.array(file.get_variable(DaCycle.DaSystem['background.co2.ocean.flux']))
fire = np.array(file.get_variable(DaCycle.DaSystem['background.co2.fires.flux']))
fossil = np.array(file.get_variable(DaCycle.DaSystem['background.co2.fossil.flux']))
#mapped_parameters = np.array(file.get_variable(DaCycle.DaSystem['final.param.mean.1x1']))
bio = np.array(file.get_variable(dacycle.dasystem['background.co2.bio.flux']))
ocean = np.array(file.get_variable(dacycle.dasystem['background.co2.ocean.flux']))
fire = np.array(file.get_variable(dacycle.dasystem['background.co2.fires.flux']))
fossil = np.array(file.get_variable(dacycle.dasystem['background.co2.fossil.flux']))
#mapped_parameters = np.array(file.get_variable(dacycle.dasystem['final.param.mean.1x1']))
file.close()
next = ncf.inq_unlimlen()[0]
vectorbio = StateVector.grid2vector(griddata=bio * area, method='sum')
vectorocn = StateVector.grid2vector(griddata=ocean * area, method='sum')
vectorfire = StateVector.grid2vector(griddata=fire * area, method='sum')
vectorfossil = StateVector.grid2vector(griddata=fossil * area, method='sum')
vectorbio = statevector.grid2vector(griddata=bio * area, method='sum')
vectorocn = statevector.grid2vector(griddata=ocean * area, method='sum')
vectorfire = statevector.grid2vector(griddata=fire * area, method='sum')
vectorfossil = statevector.grid2vector(griddata=fossil * area, method='sum')
# Start adding datasets from here on, both prior and posterior datasets for bio and ocn
for prior in [True, False]:
#
# Now fill the StateVector with the prior values for this time step. Note that the prior value for this time step
# Now fill the statevector with the prior values for this time step. Note that the prior value for this time step
# occurred nlag time steps ago, so we make a shift in the output directory, but only if we are more than nlag cycle away from the start date..
#
if prior:
qual_short = 'prior'
for n in range(nlag, 0, -1):
priordate = enddate - timedelta(dt.days * n)
savedir = DaCycle['dir.output'].replace(startdate.strftime('%Y%m%d'), priordate.strftime('%Y%m%d'))
savedir = dacycle['dir.output'].replace(startdate.strftime('%Y%m%d'), priordate.strftime('%Y%m%d'))
filename = os.path.join(savedir,'savestate_%s.nc' % priordate.strftime('%Y%m%d'))
if os.path.exists(filename):
StateVector.read_from_file(filename, qual=qual_short)
statevector.read_from_file(filename, qual=qual_short)
# Replace the mean statevector by all ones (assumed priors)
statemean = np.ones((StateVector.nparams,))
statemean = np.ones((statevector.nparams,))
choicelag = n
logging.debug('Read prior dataset from file %s, lag %d: ' % (filename, choicelag))
break
else:
qual_short = 'opt'
savedir = DaCycle['dir.output']
savedir = dacycle['dir.output']
filename = os.path.join(savedir, 'savestate_%s.nc' % startdate.strftime('%Y%m%d'))
StateVector.read_from_file(filename)
statevector.read_from_file(filename)
choicelag = 1
statemean = StateVector.EnsembleMembers[choicelag - 1][0].ParameterValues
statemean = statevector.ensemble_members[choicelag - 1][0].param_values
logging.debug('Read posterior dataset from file %s, lag %d: ' % (filename, choicelag))
#
# if prior, do not multiply fluxes with parameters, otherwise do
......@@ -342,8 +336,8 @@ def save_weekly_avg_state_data(DaCycle, StateVector):
# which are assumed 1.0 in the prior always.
#
members = StateVector.EnsembleMembers[choicelag - 1]
deviations = np.array([mem.ParameterValues * vectorbio for mem in members])
members = statevector.ensemble_members[choicelag - 1]
deviations = np.array([mem.param_values * vectorbio for mem in members])
deviations = deviations - deviations[0, :]
savedict = ncf.standard_var(varname='bio_flux_%s_ensemble' % qual_short)
......@@ -383,7 +377,7 @@ def save_weekly_avg_state_data(DaCycle, StateVector):
# which are assumed 1.0 in the prior always.
#
deviations = np.array([mem.ParameterValues * vectorocn for mem in members])
deviations = np.array([mem.param_values * vectorocn for mem in members])
deviations = deviations - deviations[0, :]
savedict = ncf.standard_var(varname='ocn_flux_%s_ensemble' % qual_short)
......@@ -440,30 +434,30 @@ def save_weekly_avg_state_data(DaCycle, StateVector):
return saveas
def save_weekly_avg_tc_data(DaCycle, StateVector):
def save_weekly_avg_tc_data(dacycle, statevector):
"""
Function creates a NetCDF file with output on TransCom regions. It uses the flux input from the
function `save_weekly_avg_1x1_data` to create fluxes of length `nparameters`, which are then projected
onto TC regions using the internal methods from :class:`~da.baseclasses.statevector.StateVector`.
:param DaCycle: a :class:`~da.tools.initexit.CycleControl` object
:param StateVector: a :class:`~da.baseclasses.statevector.StateVector`
:param dacycle: a :class:`~da.tools.initexit.CycleControl` object
:param statevector: a :class:`~da.baseclasses.statevector.StateVector`
:rtype: None
This function only read the prior fluxes from the flux_1x1.nc files created before, because we want to convolve
these with the parameters in the StateVector. This creates posterior fluxes, and the posterior covariance for the complete
StateVector in units of mol/box/s which we then turn into TC fluxes and covariances.
these with the parameters in the statevector. This creates posterior fluxes, and the posterior covariance for the complete
statevector in units of mol/box/s which we then turn into TC fluxes and covariances.
"""
#
dirname = create_dirs(os.path.join(DaCycle['dir.analysis'], 'data_tc_weekly'))
dirname = create_dirs(os.path.join(dacycle['dir.analysis'], 'data_tc_weekly'))
#
# Some help variables
#
dectime0 = date2num(datetime(2000, 1, 1))
dt = DaCycle['cyclelength']
startdate = DaCycle['time.start']
enddate = DaCycle['time.end']
dt = dacycle['cyclelength']
startdate = dacycle['time.start']
enddate = dacycle['time.end']
ncfdate = date2num(startdate) - dectime0 + dt.days / 2.0
logging.debug("DA Cycle start date is %s" % startdate.strftime('%Y-%m-%d %H:%M'))
......@@ -492,7 +486,7 @@ def save_weekly_avg_tc_data(DaCycle, StateVector):
area = globarea()
infile = os.path.join(DaCycle['dir.analysis'], 'data_state_weekly', 'statefluxes.nc')
infile = os.path.join(dacycle['dir.analysis'], 'data_state_weekly', 'statefluxes.nc')
if not os.path.exists(infile):
logging.error("Needed input file (%s) does not exist yet, please create file first, returning..." % infile)
return None
......@@ -501,7 +495,7 @@ def save_weekly_avg_tc_data(DaCycle, StateVector):
# Transform data one by one
# Get the date variable, and find index corresponding to the DaCycle date
# Get the date variable, and find index corresponding to the dacycle date
try:
dates = ncf_in.variables['date'][:]
......@@ -537,13 +531,13 @@ def save_weekly_avg_tc_data(DaCycle, StateVector):
elif 'ensemble' in vname:
tcdata = []
for member in data:
tcdata.append(StateVector.vector2tc(vectordata=member))
tcdata.append(statevector.vector2tc(vectordata=member))
tcdata = np.array(tcdata)
try:
cov = tcdata.transpose().dot(tcdata) / (StateVector.nmembers - 1)
cov = tcdata.transpose().dot(tcdata) / (statevector.nmembers - 1)
except:
cov = np.dot(tcdata.transpose(), tcdata) / (StateVector.nmembers - 1) # Huygens fix
cov = np.dot(tcdata.transpose(), tcdata) / (statevector.nmembers - 1) # Huygens fix
#print vname,cov.sum()
......@@ -555,7 +549,7 @@ def save_weekly_avg_tc_data(DaCycle, StateVector):
else:
tcdata = StateVector.vector2tc(vectordata=data) # vector to TC
tcdata = statevector.vector2tc(vectordata=data) # vector to TC
savedict = ncf.standard_var(varname=vname)
savedict['dims'] = dimdate + dimregs
......@@ -572,7 +566,7 @@ def save_weekly_avg_tc_data(DaCycle, StateVector):
return saveas
def save_weekly_avg_ext_tc_data(DaCycle):
def save_weekly_avg_ext_tc_data(dacycle):
""" Function SaveTCDataExt saves surface flux data to NetCDF files for extended TransCom regions
*** Inputs ***
......@@ -586,14 +580,14 @@ def save_weekly_avg_ext_tc_data(DaCycle):
#
dirname = create_dirs(os.path.join(DaCycle['dir.analysis'], 'data_tc_weekly'))
dirname = create_dirs(os.path.join(dacycle['dir.analysis'], 'data_tc_weekly'))
#
# Some help variables
#
dectime0 = date2num(datetime(2000, 1, 1))
dt = DaCycle['cyclelength']
startdate = DaCycle['time.start']
enddate = DaCycle['time.end']
dt = dacycle['cyclelength']
startdate = dacycle['time.start']
enddate = dacycle['time.end']
ncfdate = date2num(startdate) - dectime0 + dt.days / 2.0
logging.debug("DA Cycle start date is %s" % startdate.strftime('%Y-%m-%d %H:%M'))
......@@ -617,7 +611,7 @@ def save_weekly_avg_ext_tc_data(DaCycle):
if skip:
logging.warning('Skipping writing of data for date %s : already present in file %s' % (startdate.strftime('%Y-%m-%d'), saveas))
else:
infile = os.path.join(DaCycle['dir.analysis'], 'data_tc_weekly', 'tcfluxes.nc')
infile = os.path.join(dacycle['dir.analysis'], 'data_tc_weekly', 'tcfluxes.nc')
if not os.path.exists(infile):
logging.error("Needed input file (%s) does not exist yet, please create file first, returning..." % infile)
return None
......@@ -626,7 +620,7 @@ def save_weekly_avg_ext_tc_data(DaCycle):
# Transform data one by one
# Get the date variable, and find index corresponding to the DaCycle date
# Get the date variable, and find index corresponding to the dacycle date
try:
dates = ncf_in.variables['date'][:]
......@@ -688,30 +682,30 @@ def save_weekly_avg_ext_tc_data(DaCycle):
return saveas
def save_weekly_avg_agg_data(DaCycle, region_aggregate='olson'):
def save_weekly_avg_agg_data(dacycle, region_aggregate='olson'):
"""
Function creates a NetCDF file with output on TransCom regions. It uses the flux input from the
function `save_weekly_avg_1x1_data` to create fluxes of length `nparameters`, which are then projected
onto TC regions using the internal methods from :class:`~da.baseclasses.statevector.StateVector`.
:param DaCycle: a :class:`~da.tools.initexit.CycleControl` object
:param dacycle: a :class:`~da.tools.initexit.CycleControl` object
:param StateVector: a :class:`~da.baseclasses.statevector.StateVector`
:rtype: None
This function only read the prior fluxes from the flux_1x1.nc files created before, because we want to convolve
these with the parameters in the StateVector. This creates posterior fluxes, and the posterior covariance for the complete
StateVector in units of mol/box/s which we then turn into TC fluxes and covariances.
these with the parameters in the statevector. This creates posterior fluxes, and the posterior covariance for the complete
statevector in units of mol/box/s which we then turn into TC fluxes and covariances.
"""
#
dirname = create_dirs(os.path.join(DaCycle['dir.analysis'], 'data_%s_weekly' % region_aggregate))
dirname = create_dirs(os.path.join(dacycle['dir.analysis'], 'data_%s_weekly' % region_aggregate))
#
# Some help variables
#
dectime0 = date2num(datetime(2000, 1, 1))
dt = DaCycle['cyclelength']
startdate = DaCycle['time.start']
enddate = DaCycle['time.end']
dt = dacycle['cyclelength']
startdate = dacycle['time.start']
enddate = dacycle['time.end']
ncfdate = date2num(startdate) - dectime0 + dt.days / 2.0
logging.debug("DA Cycle start date is %s" % startdate.strftime('%Y-%m-%d %H:%M'))
......@@ -754,13 +748,10 @@ def save_weekly_avg_agg_data(DaCycle, region_aggregate='olson'):
elif region_aggregate == "country":
countrydict = ct.get_countrydict()
selected = ['Russia', 'Canada', 'China', 'United States', 'EU27', 'Brazil', 'Australia', 'India'] #,'G8','UNFCCC_annex1','UNFCCC_annex2']
regionmask = np.zeros((180, 360,), 'float')
for i, name in enumerate(selected):
lab = 'Country_%03d' % (i + 1,)
setattr(ncf, lab, name)
......@@ -812,7 +803,7 @@ def save_weekly_avg_agg_data(DaCycle, region_aggregate='olson'):
area = globarea()
infile = os.path.join(DaCycle['dir.analysis'], 'data_flux1x1_weekly', 'flux_1x1.%s.nc' % startdate.strftime('%Y-%m-%d'))
infile = os.path.join(dacycle['dir.analysis'], 'data_flux1x1_weekly', 'flux_1x1.%s.nc' % startdate.strftime('%Y-%m-%d'))
if not os.path.exists(infile):
logging.error("Needed input file (%s) does not exist yet, please create file first, returning..." % infile)
return None
......@@ -821,7 +812,7 @@ def save_weekly_avg_agg_data(DaCycle, region_aggregate='olson'):
# Transform data one by one
# Get the date variable, and find index corresponding to the DaCycle date
# Get the date variable, and find index corresponding to the dacycle date
try:
dates = ncf_in.variables['date'][:]
......@@ -900,7 +891,7 @@ def save_weekly_avg_agg_data(DaCycle, region_aggregate='olson'):
return saveas
def save_time_avg_data(DaCycle, infile, avg='monthly'):
def save_time_avg_data(dacycle, infile, avg='monthly'):
""" Function saves time mean surface flux data to NetCDF files
*** Inputs ***
......@@ -920,7 +911,7 @@ def save_time_avg_data(DaCycle, infile, avg='monthly'):
intime = 'yearly'
dirname, filename = os.path.split(infile)
outdir = create_dirs(os.path.join(DaCycle['dir.analysis'], dirname.replace(intime, avg)))
outdir = create_dirs(os.path.join(dacycle['dir.analysis'], dirname.replace(intime, avg)))
dectime0 = date2num(datetime(2000, 1, 1))
......@@ -1024,8 +1015,6 @@ def save_time_avg_data(DaCycle, infile, avg='monthly'):
return saveas
if __name__ == "__main__":
import logging
from da.tools.initexit import CycleControl
from da.carbondioxide.dasystem import CO2DaSystem
from da.carbondioxide.statevector import CO2StateVector
......@@ -1034,37 +1023,35 @@ if __name__ == "__main__":
logging.root.setLevel(logging.DEBUG)
DaCycle = CycleControl(args={'rc':'../../ctdas-od-gfed2-nam1x1-obspack-full-gridded.rc'})
DaCycle.initialize()
DaCycle.parse_times()
DaSystem = CO2DaSystem('../rc/carbontracker_ct09_opfnew.rc')
dacycle = CycleControl(args={'rc':'../../ctdas-od-gfed2-nam1x1-obspack-full-gridded.rc'})
dacycle.initialize()
dacycle.parse_times()
DaCycle.DaSystem = DaSystem
dasystem = CO2DaSystem('../rc/carbontracker_ct09_opfnew.rc')
StateVector = CO2StateVector()
StateVector.initialize(DaCycle)
dacycle.dasystem = dasystem
while DaCycle['time.end'] < DaCycle['time.finish']:
statevector = CO2StateVector()
statevector.initialize(dacycle)
savedas_1x1 = save_weekly_avg_1x1_data(DaCycle, StateVector)
savedas_state = save_weekly_avg_state_data(DaCycle, StateVector)
savedas_tc = save_weekly_avg_tc_data(DaCycle, StateVector)
savedas_tcext = save_weekly_avg_ext_tc_data(DaCycle)
savedas_olson = save_weekly_avg_agg_data(DaCycle, region_aggregate='olson')
savedas_transcom = save_weekly_avg_agg_data(DaCycle, region_aggregate='transcom')
savedas_country = save_weekly_avg_agg_data(DaCycle, region_aggregate='country')
while dacycle['time.end'] < dacycle['time.finish']:
savedas_1x1 = save_weekly_avg_1x1_data(dacycle, statevector)
savedas_state = save_weekly_avg_state_data(dacycle, statevector)
savedas_tc = save_weekly_avg_tc_data(dacycle, statevector)
savedas_tcext = save_weekly_avg_ext_tc_data(dacycle)
savedas_olson = save_weekly_avg_agg_data(dacycle, region_aggregate='olson')
savedas_transcom = save_weekly_avg_agg_data(dacycle, region_aggregate='transcom')
savedas_country = save_weekly_avg_agg_data(dacycle, region_aggregate='country')
DaCycle.advance_cycle_times()
dacycle.advance_cycle_times()
StateVector = None # free memory
statevector = None # free memory
for avg in ['monthly', 'yearly', 'longterm']:
savedas_1x1 = save_time_avg_data(DaCycle, savedas_1x1, avg)
savedas_state = save_time_avg_data(DaCycle, savedas_state, avg)
savedas_tc = save_time_avg_data(DaCycle, savedas_tc, avg)
savedas_tcext = save_time_avg_data(DaCycle, savedas_tcext, avg)
savedas_1x1 = save_time_avg_data(dacycle, savedas_1x1, avg)
savedas_state = save_time_avg_data(dacycle, savedas_state, avg)
savedas_tc = save_time_avg_data(dacycle, savedas_tc, avg)
savedas_tcext = save_time_avg_data(dacycle, savedas_tcext, avg)
sys.exit(0)
......@@ -23,7 +23,7 @@ File created on 11 May 2012.
"""
def write_mixing_ratios(DaCycle):
def write_mixing_ratios(dacycle):
"""
Write Sample information to NetCDF files. These files are organized by site and
......@@ -40,23 +40,23 @@ def write_mixing_ratios(DaCycle):
"""
dirname = create_dirs(os.path.join(DaCycle['dir.analysis'], 'data_molefractions'))
dirname = create_dirs(os.path.join(dacycle['dir.analysis'], 'data_molefractions'))
#
# Some help variables
#
dectime0 = date2num(datetime(2000, 1, 1))
dt = DaCycle['cyclelength']
startdate = DaCycle['time.start']
enddate = DaCycle['time.end']
dt = dacycle['cyclelength']
startdate = dacycle['time.start']
enddate = dacycle['time.end']
logging.debug("DA Cycle start date is %s" % startdate.strftime('%Y-%m-%d %H:%M'))
logging.debug("DA Cycle end date is %s" % enddate.strftime('%Y-%m-%d %H:%M'))
DaCycle['time.sample.stamp'] = "%s_%s" % (startdate.strftime("%Y%m%d%H"), enddate.strftime("%Y%m%d%H"),)
dacycle['time.sample.stamp'] = "%s_%s" % (startdate.strftime("%Y%m%d%H"), enddate.strftime("%Y%m%d%H"),)
# Step (1): Get the posterior sample output data file for this cycle
infile = os.path.join(DaCycle['dir.output'], 'sampleinfo_%s__newstyle.nc' % DaCycle['time.sample.stamp'])
infile = os.path.join(dacycle['dir.output'], 'sampleinfo_%s__newstyle.nc' % dacycle['time.sample.stamp'])
ncf_in = io.CT_Read(infile, 'read')
......@@ -72,7 +72,7 @@ def write_mixing_ratios(DaCycle):
# Step (2): Get the prior sample output data file for this cycle
infile = os.path.join(DaCycle['dir.output'], 'optimizer.%s.nc' % startdate.strftime('%Y%m%d'))
infile = os.path.join(dacycle['dir.output'], 'optimizer.%s.nc' % startdate.strftime('%Y%m%d'))
if os.path.exists(infile):
optimized_present = True
......@@ -88,13 +88,13 @@ def write_mixing_ratios(DaCycle):
fc_simulated = ncf_fc_in.get_variable('modelsamplesmean_prior')
fc_simulated_ens = ncf_fc_in.get_variable('modelsamplesdeviations_prior')
fc_flag = ncf_fc_in.get_variable('flag')
if not DaCycle.DaSystem.has_key('opt.algorithm'):
if not dacycle.dasystem.has_key('opt.algorithm'):
fc_r = ncf_fc_in.get_variable('modeldatamismatchvariance')
fc_hphtr = ncf_fc_in.get_variable('totalmolefractionvariance')
elif DaCycle.DaSystem['opt.algorithm'] == 'serial':
elif dacycle.dasystem['opt.algorithm'] == 'serial':
fc_r = ncf_fc_in.get_variable('modeldatamismatchvariance')
fc_hphtr = ncf_fc_in.get_variable('totalmolefractionvariance')
elif DaCycle.DaSystem['opt.algorithm'] == 'bulk':
elif dacycle.dasystem['opt.algorithm'] == 'bulk':
fc_r = ncf_fc_in.get_variable('modeldatamismatchvariance').diagonal()
fc_hphtr = ncf_fc_in.get_variable('totalmolefractionvariance').diagonal()
filesitecode = ncf_fc_in.get_variable('sitecode')
......@@ -136,11 +136,11 @@ def write_mixing_ratios(DaCycle):
ncf_out.History = '\nOriginal observation file modified by user %s on %s\n' % (os.environ['USER'], datetime.today().strftime('%F'),)
ncf_out.CTDAS_info = 'Simulated values added from a CTDAS run by %s on %s\n' % (os.environ['USER'], datetime.today().strftime('%F'),)\
+ '\nCTDAS was run on platform %s' % (os.environ['HOST'],)\
+ '\nCTDAS job directory was %s' % (DaCycle['dir.da_run'],)\
+ '\nCTDAS Da System was %s' % (DaCycle['da.system'],)\