Commit 8ffdbf8c authored by brunner's avatar brunner
Browse files

no cdo afternoon mean

parent 640e5e92
...@@ -29,17 +29,16 @@ ...@@ -29,17 +29,16 @@
! The time for which to start and end the data assimilation experiment in format YYYY-MM-DD HH:MM:SS ! The time for which to start and end the data assimilation experiment in format YYYY-MM-DD HH:MM:SS
! the following 3 lines are for initial start ! the following 3 lines are for initial start
time.start : 2019-01-01 00:00:00 time.start : 2019-03-01 00:00:00
time.finish : 2019-01-07 23:00:00 time.finish : 2019-03-07 23:00:00
time.end : 2019-01-07 23:00:00 time.end : 2019-03-07 23:00:00
abs.time.start : 2019-03-01 00:00:00
abs.time.start : 2019-01-01 00:00:00
! Whether to restart the CTDAS system from a previous cycle, or to start the sequence fresh. Valid entries are T/F/True/False/TRUE/FALSE ! Whether to restart the CTDAS system from a previous cycle, or to start the sequence fresh. Valid entries are T/F/True/False/TRUE/FALSE
time.restart : F time.restart : F
!da.restart.tstamp : 2013-01-08 00:00:00 ! da.restart.tstamp : 2019-03-08 00:00:00
da.restart.tstamp : 2013-01-01 00:00:00 da.restart.tstamp : 2019-03-01 00:00:00
! The length of a cycle is given in days, such that the integer 7 denotes the typically used weekly cycle. Valid entries are integers > 1 ! The length of a cycle is given in days, such that the integer 7 denotes the typically used weekly cycle. Valid entries are integers > 1
...@@ -54,6 +53,7 @@ time.nlag : 2 ...@@ -54,6 +53,7 @@ time.nlag : 2
run.name : real run.name : real
dir.da_run : /scratch/snx3000/parsenov/${run.name} dir.da_run : /scratch/snx3000/parsenov/${run.name}
dir.ct_save : /store/empa/em05/parsenov/ct_data/${run.name}/
restartmap.dir : ${dir.da_run}/input restartmap.dir : ${dir.da_run}/input
! The resources used to complete the data assimilation experiment. This depends on your computing platform. ! The resources used to complete the data assimilation experiment. This depends on your computing platform.
...@@ -86,6 +86,7 @@ da.system : CarbonTracker ...@@ -86,6 +86,7 @@ da.system : CarbonTracker
! The specific settings for your system are read from a separate rc-file, which points to the data directories, observations, etc ! The specific settings for your system are read from a separate rc-file, which points to the data directories, observations, etc
da.system.rc : da/rc/carbontracker_cosmo.rc da.system.rc : da/rc/carbontracker_cosmo.rc
locations : /store/empa/em05/parsenov/ct_data/locations.csv
! This flag should probably be moved to the da.system.rc file. It denotes which type of filtering to use in the optimizer ! This flag should probably be moved to the da.system.rc file. It denotes which type of filtering to use in the optimizer
......
...@@ -85,5 +85,3 @@ save_weekly_avg_ext_tc_data(dacycle) ...@@ -85,5 +85,3 @@ save_weekly_avg_ext_tc_data(dacycle)
write_mole_fractions(dacycle) write_mole_fractions(dacycle)
sys.exit(0) sys.exit(0)
...@@ -313,6 +313,8 @@ class Optimizer(object): ...@@ -313,6 +313,8 @@ class Optimizer(object):
def serial_minimum_least_squares(self): def serial_minimum_least_squares(self):
""" Make minimum least squares solution by looping over obs""" """ Make minimum least squares solution by looping over obs"""
# Corrected for bias over all stations
bias = np.mean(self.obs) - np.mean(self.Hx)
for n in range(self.nobs): for n in range(self.nobs):
# Screen for flagged observations (for instance site not found, or no sample written from model) # Screen for flagged observations (for instance site not found, or no sample written from model)
...@@ -322,13 +324,15 @@ class Optimizer(object): ...@@ -322,13 +324,15 @@ class Optimizer(object):
continue continue
# Screen for outliers greather than 3x model-data mismatch, only apply if obs may be rejected # Screen for outliers greather than 3x model-data mismatch, only apply if obs may be rejected
# Calculate residual for rejecting the observations (corrected for bias) - res_rej
res_rej = self.obs[n] - self.Hx[n] - bias
res = self.obs[n] - self.Hx[n] res = self.obs[n] - self.Hx[n]
if self.may_reject[n]: if self.may_reject[n]:
threshold = self.rejection_threshold * np.sqrt(self.R[n]) threshold = self.rejection_threshold * np.sqrt(self.R[n])
if np.abs(res) > threshold: #if np.abs(res) > threshold + abs(bias):
logging.debug('Rejecting observation (%s,%i) because residual (%f) exceeds threshold (%f)' % (self.sitecode[n], self.obs_ids[n], res, threshold)) if np.abs(res_rej) > threshold:
logging.debug('Rejecting observation (%s,%i) because residual (%f) exceeds threshold (%f)' % (self.sitecode[n], self.obs_ids[n], res, threshold + abs(bias)))
self.flags[n] = 2 self.flags[n] = 2
continue continue
......
...@@ -64,17 +64,29 @@ class CO2StateVector(StateVector): ...@@ -64,17 +64,29 @@ class CO2StateVector(StateVector):
fullcov = np.zeros(shape=(90,90)) fullcov = np.zeros(shape=(90,90))
# partcov = np.array([ \
# (0.64, 0.36, 0.16, 0.16, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01), \
# (0.36, 0.64, 0.16, 0.16, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01), \
# (0.16, 0.16, 0.64, 0.36, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01), \
# (0.16, 0.16, 0.36, 0.64, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01), \
# (0.16, 0.16, 0.16, 0.16, 0.64, 0.36, 0.04, 0.04, 0.04, 0.01), \
# (0.16, 0.16, 0.16, 0.16, 0.36, 0.64, 0.04, 0.04, 0.04, 0.01), \
# (0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.64, 0.16, 0.16, 0.16), \
#(0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.16, 0.64, 0.16, 0.16), \
# (0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.16, 0.16, 0.64, 0.16), \
# (0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.16, 0.16, 0.16, 0.64) ])
partcov = np.array([ \ partcov = np.array([ \
(0.64, 0.36, 0.16, 0.16, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01), \ (0.1089, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900), \
(0.36, 0.64, 0.16, 0.16, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01), \ (0.0900, 0.1089, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900), \
(0.16, 0.16, 0.64, 0.36, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01), \ (0.0900, 0.0900, 0.1089, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900), \
(0.16, 0.16, 0.36, 0.64, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01), \ (0.0900, 0.0900, 0.0900, 0.1089, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900), \
(0.16, 0.16, 0.16, 0.16, 0.64, 0.36, 0.04, 0.04, 0.04, 0.01), \ (0.0900, 0.0900, 0.0900, 0.0900, 0.1089, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900), \
(0.16, 0.16, 0.16, 0.16, 0.36, 0.64, 0.04, 0.04, 0.04, 0.01), \ (0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.1089, 0.0900, 0.0900, 0.0900, 0.0900), \
(0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.64, 0.16, 0.16, 0.16), \ (0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.1089, 0.0900, 0.0900, 0.0900), \
(0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.16, 0.64, 0.16, 0.16), \ (0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.1089, 0.0900, 0.0900), \
(0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.16, 0.16, 0.64, 0.16), \ (0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.1089, 0.0900), \
(0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.16, 0.16, 0.16, 0.64) ]) (0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.0900, 0.1089) ])
# L = 300 km # L = 300 km
......
This diff is collapsed.
...@@ -354,7 +354,7 @@ class ObsPackObservations(Observations): ...@@ -354,7 +354,7 @@ class ObsPackObservations(Observations):
#identifier = name_convert(name="%s_%s_%s" % (site.lower(), method.lower(), lab.lower(),), to='GV') #identifier = name_convert(name="%s_%s_%s" % (site.lower(), method.lower(), lab.lower(),), to='GV')
if identifier in site_info: if identifier in site_info:
logging.debug("Observation found (%s, %s)" % (obs.code, identifier)) # logging.debug("Observation found (%s, %s)" % (obs.code, identifier))
obs.mdm = site_info[identifier]['error'] * self.global_R_scaling obs.mdm = site_info[identifier]['error'] * self.global_R_scaling
obs.may_localize = site_info[identifier]['may_localize'] obs.may_localize = site_info[identifier]['may_localize']
obs.may_reject = site_info[identifier]['may_reject'] obs.may_reject = site_info[identifier]['may_reject']
......
...@@ -320,7 +320,7 @@ def sample_step(dacycle, samples, statevector, obsoperator, lag, advance=False): ...@@ -320,7 +320,7 @@ def sample_step(dacycle, samples, statevector, obsoperator, lag, advance=False):
# type of info needed in your transport model # type of info needed in your transport model
# statevector.write_members_to_file(lag, dacycle['dir.input']) # statevector.write_members_to_file(lag, dacycle['dir.input'])
statevector.write_members_for_cosmo(lag, dacycle['dir.input']) statevector.write_members_for_cosmo(advance, lag, dacycle['dir.ct_save'])
samples.setup(dacycle) samples.setup(dacycle)
samples.add_observations() samples.add_observations()
......
...@@ -285,11 +285,12 @@ class StateVector(object): ...@@ -285,11 +285,12 @@ class StateVector(object):
newmember = EnsembleMember(0) newmember = EnsembleMember(0)
newmember.param_values = newmean.flatten() # no deviations newmember.param_values = newmean.flatten() # no deviations
self.ensemble_members[lag].append(newmember) self.ensemble_members[lag].append(newmember)
sum_par = np.empty(self.nparams) # sum_par = np.empty(self.nparams)
# Create members 1:nmembers and add to ensemble_members list # Create members 1:nmembers and add to ensemble_members list
for member in range(1, self.nmembers): for member in range(1, self.nmembers):
rands = np.random.normal(loc=0.0, scale=0.4, size=self.nparams-1) # rands = np.random.normal(loc=0.0, scale=0.4, size=self.nparams-1)
rands = np.random.randn(self.nparams-1)
rands_bg = np.random.normal(loc=0.0, scale=0.05, size=1) #*1E-4 # x variance(1E-4) rands_bg = np.random.normal(loc=0.0, scale=0.05, size=1) #*1E-4 # x variance(1E-4)
newmember = EnsembleMember(member) newmember = EnsembleMember(member)
...@@ -304,8 +305,8 @@ class StateVector(object): ...@@ -304,8 +305,8 @@ class StateVector(object):
newmember.param_values[newmember.param_values<0.] = 0. newmember.param_values[newmember.param_values<0.] = 0.
# newmember.param_values[newmember.param_values>2.] = 2. # newmember.param_values[newmember.param_values>2.] = 2.
self.ensemble_members[lag].append(newmember) self.ensemble_members[lag].append(newmember)
sum_par = sum_par + newmember.param_values # sum_par = sum_par + newmember.param_values
self.ensemble_members[lag][0].param_values = sum_par/(self.nmembers-1) # self.ensemble_members[lag][0].param_values = sum_par/(self.nmembers-1)
# print(np.mean(self.ensemble_members[lag][0].param_values)) # print(np.mean(self.ensemble_members[lag][0].param_values))
logging.debug('%d new ensemble members were added to the state vector # %d' % (self.nmembers, (lag + 1))) logging.debug('%d new ensemble members were added to the state vector # %d' % (self.nmembers, (lag + 1)))
...@@ -433,7 +434,7 @@ class StateVector(object): ...@@ -433,7 +434,7 @@ class StateVector(object):
logging.info('Successfully read the State Vector from file (%s) ' % filename) logging.info('Successfully read the State Vector from file (%s) ' % filename)
def write_members_for_cosmo(self, lag, outdir,endswith='.nc'): def write_members_for_cosmo(self, advance, lag, outdir,endswith='.nc'):
members = self.ensemble_members[lag] members = self.ensemble_members[lag]
gridmap = self.gridmap.reshape(9,17,189,406) gridmap = self.gridmap.reshape(9,17,189,406)
self.nparams=int(self.nparams) self.nparams=int(self.nparams)
...@@ -443,7 +444,10 @@ class StateVector(object): ...@@ -443,7 +444,10 @@ class StateVector(object):
data = data_mat.flatten() data = data_mat.flatten()
# GPP # GPP
if not advance:
filename_gpp = os.path.join(outdir, 'parameters_gpp_lag'+str(lag)+'.%03d%s' % (mem.membernumber, endswith)) filename_gpp = os.path.join(outdir, 'parameters_gpp_lag'+str(lag)+'.%03d%s' % (mem.membernumber, endswith))
else:
filename_gpp = os.path.join(outdir, 'parameters_gpp_lag'+str(lag)+'_advanced.%03d%s' % (mem.membernumber, endswith))
ncf = io.CT_CDF(filename_gpp, method='create') ncf = io.CT_CDF(filename_gpp, method='create')
dimparams = ncf.add_params_dim(90) dimparams = ncf.add_params_dim(90)
dimgrid = ncf.add_latlon_dim() dimgrid = ncf.add_latlon_dim()
...@@ -484,7 +488,10 @@ class StateVector(object): ...@@ -484,7 +488,10 @@ class StateVector(object):
ncf.close() ncf.close()
# RESP # RESP
if not advance:
filename_resp = os.path.join(outdir, 'parameters_resp_lag'+str(lag)+'.%03d%s' % (mem.membernumber, endswith)) filename_resp = os.path.join(outdir, 'parameters_resp_lag'+str(lag)+'.%03d%s' % (mem.membernumber, endswith))
else:
filename_resp = os.path.join(outdir, 'parameters_resp_lag'+str(lag)+'_advanced.%03d%s' % (mem.membernumber, endswith))
ncf = io.CT_CDF(filename_resp, method='create') ncf = io.CT_CDF(filename_resp, method='create')
dimparams = ncf.add_params_dim(90) dimparams = ncf.add_params_dim(90)
dimgrid = ncf.add_latlon_dim() dimgrid = ncf.add_latlon_dim()
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment