Commit 6b5b4b0f authored by brunner's avatar brunner
Browse files

just before canada

parent 30c6d24d
......@@ -80,8 +80,8 @@ class Optimizer(object):
# localization of obs
self.may_localize = np.zeros(self.nobs, bool)
# rejection of obs
#self.may_reject = np.ones(self.nobs, bool)
self.may_reject = np.zeros(self.nobs, bool)
self.may_reject = np.ones(self.nobs, bool)
#self.may_reject = np.zeros(self.nobs, bool)
# flags of obs
self.flags = np.zeros(self.nobs, int)
# species type
......@@ -315,6 +315,7 @@ class Optimizer(object):
for n in range(self.nobs):
# Screen for flagged observations (for instance site not found, or no sample written from model)
if self.flags[n] != 0:
logging.debug('Skipping observation (%s,%i) because of flag value %d' % (self.sitecode[n], self.obs_ids[n], self.flags[n]))
continue
......@@ -324,7 +325,7 @@ class Optimizer(object):
res = self.obs[n] - self.Hx[n]
if self.may_reject[n]:
print('may reject')
# print('may reject')
threshold = self.rejection_threshold * np.sqrt(self.R[n])
if np.abs(res) > threshold:
logging.debug('Rejecting observation (%s,%i) because residual (%f) exceeds threshold (%f)' % (self.sitecode[n], self.obs_ids[n], res, threshold))
......@@ -346,23 +347,10 @@ class Optimizer(object):
alpha = np.double(1.0) / (np.double(1.0) + np.sqrt((self.R[n]) / self.HPHR[n]))
# if all(self.x[:] + self.KG[:] * res >= 0.):
self.x[:] = self.x + self.KG[:] * res
self.x[self.x<0.] = 0. # cut off negative values, COSMO don't like negative fluxes # pavle
for r in range(self.nmembers):
X_prime_temp = self.X_prime[:, r] - alpha * self.KG[:] * (self.HX_prime[n, r])
X_prime_temp[X_prime_temp+self.x < 0.] = 0.
# if all(self.X_prime[:, r] - alpha * self.KG[:] * (self.HX_prime[n, r]) + self.x[:] >= 0.):
self.X_prime[:, r] = X_prime_temp
#self.X_prime[:, r] = self.X_prime[:, r] - alpha * self.KG[:] * (self.HX_prime[n, r])
# continue
# else:
# self.X_prime[:, r] = self.X_prime[:, r] - alpha * self.KG[:] * (self.HX_prime[n, r])
# for p in range(self.nparams):
# if self.X_prime[p, r]+self.x[p]<0.:
# self.X_prime[p, r]=0.
self.X_prime[:, r] = self.X_prime[:, r] - alpha * self.KG[:] * (self.HX_prime[n, r])
#WP !!!! Very important to first do all obervations from n=1 through the end, and only then update 1,...,n. The current observation
#WP should always be updated last because it features in the loop of the adjustments !!!!
......@@ -379,6 +367,7 @@ class Optimizer(object):
self.Hx[m] = self.Hx[m] + fac * res
self.HX_prime[m, :] = self.HX_prime[m, :] - alpha * fac * self.HX_prime[n, :]
def bulk_minimum_least_squares(self):
""" Make minimum least squares solution by solving matrix equations"""
......
......@@ -30,7 +30,8 @@ import numpy as np
#from da.cosmo.statevector_uniform import StateVector, EnsembleMember
#from da.cosmo.statevector_read_from_output import StateVector, EnsembleMember
#from da.cosmo.statevector_mean import StateVector, EnsembleMember
from da.cosmo.statevector import StateVector, EnsembleMember
#from da.cosmo.statevector import StateVector, EnsembleMember
from da.cosmo.statevector_mean import StateVector, EnsembleMember
import da.tools.io4 as io
......
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu.
This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation,
version 3. This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program. If not, see <http://www.gnu.org/licenses/>."""
#!/usr/bin/env python
# ct_statevector_tools.py
"""
Author : peters
Revision History:
File created on 28 Jul 2010.
"""
import os
import sys
sys.path.append(os.getcwd())
import logging
import numpy as np
#from da.cosmo.statevector_uniform import StateVector, EnsembleMember
from da.cosmo.statevector_read_from_output import StateVector, EnsembleMember
#from da.cosmo.statevector_mean import StateVector, EnsembleMember
import da.tools.io4 as io
identifier = 'CarbonTracker Statevector '
version = '0.0'
################### Begin Class CO2StateVector ###################
class CO2StateVector(StateVector):
""" This is a StateVector object for CarbonTracker. It has a private method to make new ensemble members """
def get_covariance(self, date, dacycle):
""" Make a new ensemble from specified matrices, the attribute lag refers to the position in the state vector.
Note that lag=1 means an index of 0 in python, hence the notation lag-1 in the indexing below.
The argument is thus referring to the lagged state vector as [1,2,3,4,5,..., nlag]
0. Needleleaf Evergreen, Temperate
1. Needleleaf Evergreen, Boreal
2. Boardleaf Decidous, Temperate
3. Boardleaf Decidous, Boreal
4. Boardleaf Decidous Shrub, Temperate
5. Boardleaf Decidous Shrub, Boreal
6. C3 Arctic Grass
7. C3 non-Arctic Grass
8. C4 Grass
9. Crop
#10. None (removed)
"""
fullcov = np.zeros(shape=(90,90))
partcov = np.array([ \
(0.64, 0.36, 0.16, 0.16, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01), \
(0.36, 0.64, 0.16, 0.16, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01), \
(0.16, 0.16, 0.64, 0.36, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01), \
(0.16, 0.16, 0.36, 0.64, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01), \
(0.16, 0.16, 0.16, 0.16, 0.64, 0.36, 0.04, 0.04, 0.04, 0.01), \
(0.16, 0.16, 0.16, 0.16, 0.36, 0.64, 0.04, 0.04, 0.04, 0.01), \
(0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.64, 0.16, 0.16, 0.16), \
(0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.16, 0.64, 0.16, 0.16), \
(0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.16, 0.16, 0.64, 0.16), \
(0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.16, 0.16, 0.16, 0.64) ])
L_matrix = np.array([\
(1.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000), \
(0.000, 1.000, 0.715, 0.650, 0.733, 0.614, 0.909, 0.688, 0.544), \
(0.000, 0.715, 1.000, 0.862, 0.751, 0.638, 0.695, 0.501, 0.575), \
(0.000, 0.650, 0.862, 1.000, 0.776, 0.696, 0.651, 0.472, 0.639), \
(0.000, 0.733, 0.751, 0.776, 1.000, 0.827, 0.774, 0.587, 0.732), \
(0.000, 0.614, 0.638, 0.696, 0.827, 1.000, 0.660, 0.537, 0.885), \
(0.000, 0.909, 0.695, 0.651, 0.774, 0.660, 1.000, 0.721, 0.586), \
(0.000, 0.688, 0.501, 0.472, 0.587, 0.537, 0.721, 1.000, 0.489), \
(0.000, 0.544, 0.575, 0.639, 0.732, 0.885, 0.586, 0.489, 1.000) ])
for i in range(9):
for j in range(9):
fullcov[i*10:(i+1)*10,j*10:(j+1)*10] = partcov * L_matrix[i,j]
# correl = np.array([ \
# (0.8, 0.6, 0.4, 0.4, 0.4, 0.4, 0.2, 0.2, 0.2, 0.1), \
# (0.6, 0.8, 0.4, 0.4, 0.4, 0.4, 0.2, 0.2, 0.2, 0.1), \
# (0.4, 0.4, 0.8, 0.6, 0.4, 0.4, 0.2, 0.2, 0.2, 0.1), \
# (0.4, 0.4, 0.6, 0.8, 0.4, 0.4, 0.2, 0.2, 0.2, 0.1), \
# (0.4, 0.4, 0.4, 0.4, 0.8, 0.6, 0.2, 0.2, 0.2, 0.1), \
# (0.4, 0.4, 0.4, 0.4, 0.6, 0.8, 0.2, 0.2, 0.2, 0.1), \
# (0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.8, 0.4, 0.4, 0.4), \
#(0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.8, 0.4, 0.4), \
# (0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.4, 0.8, 0.4), \
# (0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.4, 0.8) ])
# fullcov = np.array([ \
# (1.00, 0.36, 0.16, 0.16, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01, 0.00), \
# (0.36, 1.00, 0.16, 0.16, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01, 0.00), \
# (0.16, 0.16, 1.00, 0.36, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01, 0.00), \
# (0.16, 0.16, 0.36, 1.00, 0.16, 0.16, 0.04, 0.04, 0.04, 0.01, 0.00), \
# (0.16, 0.16, 0.16, 0.16, 1.00, 0.36, 0.04, 0.04, 0.04, 0.01, 0.00), \
# (0.16, 0.16, 0.16, 0.16, 0.36, 1.00, 0.04, 0.04, 0.04, 0.01, 0.00), \
# (0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 1.00, 0.16, 0.16, 0.16, 0.00), \
#(0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.16, 1.00, 0.16, 0.16, 0.00), \
# (0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.16, 0.16, 1.00, 0.16, 0.00), \
# (0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.16, 0.16, 0.16, 1.00, 0.00), \
# # (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.e-10) ])
# (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00) ])
return fullcov
def read_from_legacy_file(self, filename, qual='opt'):
"""
:param filename: the full filename for the input NetCDF file
:param qual: a string indicating whether to read the 'prior' or 'opt'(imized) StateVector from file
:rtype: None
Read the StateVector information from a NetCDF file and put in a StateVector object
In principle the input file will have only one four datasets inside
called:
* `meanstate_prior`, dimensions [nlag, nparamaters]
* `ensemblestate_prior`, dimensions [nlag,nmembers, nparameters]
* `meanstate_opt`, dimensions [nlag, nparamaters]
* `ensemblestate_opt`, dimensions [nlag,nmembers, nparameters]
This NetCDF information can be written to file using
:meth:`~da.baseclasses.statevector.StateVector.write_to_file`
"""
f = io.ct_read(filename, 'read')
for n in range(self.nlag):
if qual == 'opt':
meanstate = f.get_variable('statevectormean_opt')
EnsembleMembers = f.get_variable('statevectorensemble_opt')
elif qual == 'prior':
meanstate = f.get_variable('statevectormean_prior')
EnsembleMembers = f.get_variable('statevectorensemble_prior')
if not self.ensemble_members[n] == []:
self.ensemble_members[n] = []
logging.warning('Existing ensemble for lag=%d was removed to make place for newly read data' % (n + 1))
for m in range(self.nmembers):
newmember = EnsembleMember(m)
newmember.param_values = EnsembleMembers[m, :].flatten() + meanstate # add the mean to the deviations to hold the full parameter values
self.ensemble_members[n].append(newmember)
f.close()
logging.info('Successfully read the State Vector from file (%s) ' % filename)
def read_from_file_exceptsam(self, filename, qual='opt'):
"""
:param filename: the full filename for the input NetCDF file
:param qual: a string indicating whether to read the 'prior' or 'opt'(imized) StateVector from file
:rtype: None
Read the StateVector information from a NetCDF file and put in a StateVector object
In principle the input file will have only one four datasets inside
called:
* `meanstate_prior`, dimensions [nlag, nparamaters]
* `ensemblestate_prior`, dimensions [nlag,nmembers, nparameters]
* `meanstate_opt`, dimensions [nlag, nparamaters]
* `ensemblestate_opt`, dimensions [nlag,nmembers, nparameters]
This NetCDF information can be written to file using
:meth:`~da.baseclasses.statevector.StateVector.write_to_file`
"""
f = io.ct_read(filename, 'read')
meanstate = f.get_variable('statevectormean_' + qual)
# meanstate[:,39:77] = 1
ensmembers = f.get_variable('statevectorensemble_' + qual)
f.close()
for n in range(self.nlag):
if not self.ensemble_members[n] == []:
self.ensemble_members[n] = []
logging.warning('Existing ensemble for lag=%d was removed to make place for newly read data' % (n + 1))
for m in range(self.nmembers):
newmember = EnsembleMember(m)
newmember.param_values = ensmembers[n, m, :].flatten() + meanstate[n] # add the mean to the deviations to hold the full parameter values
self.ensemble_members[n].append(newmember)
logging.info('Successfully read the State Vector from file (%s) ' % filename)
# logging.info('State Vector set to 1 for South American regions')
################### End Class CO2StateVector ###################
if __name__ == "__main__":
pass
......@@ -103,8 +103,7 @@ class ObservationOperator(object):
f_in.close()
shape = (self.forecast_nmembers,mdm.size)
model_data=np.empty(shape=shape) # 3x7
model_data = np.empty(shape=(self.forecast_nmembers,mdm.size)) # 3x7
self.lambda_file = os.path.join(self.outputdir, 'lambda.%s.nc' % self.dacycle['time.sample.stamp'])
ofile = Dataset(self.lambda_file, mode='w')
......@@ -116,6 +115,7 @@ class ObservationOperator(object):
for m in range(0,self.forecast_nmembers):
co2[m,:] = members[m].param_values
# co2[co2<0] = 0.
l[:] = co2
ofile.close()
os.system('cp '+self.lambda_file+' '+dacycle['da.vprm']+'/lambdas.nc')
......@@ -128,7 +128,7 @@ class ObservationOperator(object):
else:
os.rename(dacycle['dir.da_run']+"/"+absolute_start_time+"_"+str(starth+lag*168)+"_"+str(endh+lag*168), dacycle['dir.da_run']+"/non_opt_"+dacycle['time.start'].strftime('%Y%m%d%H')+"_"+str(starth+lag*168)+"_"+str(endh+lag*168))
os.system('python run_chain.py '+self.dacycle['run.name']+' '+absolute_start_time_ch+' '+str(starth+lag*168)+' '+str(endh+lag*168)+' -j meteo icbc emissions int2lm post_int2lm octe online_vprm cosmo')
os.system('python run_chain.py '+self.dacycle['run.name']+' '+absolute_start_time_ch+' '+str(starth+lag*168)+' '+str(endh+lag*168)+' -j meteo icbc int2lm post_int2lm oae octe online_vprm cosmo')
logging.info('COSMO done!')
os.chdir(dacycle['dir.da_run'])
......@@ -191,10 +191,10 @@ class ObservationOperator(object):
co2_out_lhw = cosmo_out+'CO2_lhw_'+ens+'_'+dt+'.nc'
co2_out_brm = cosmo_out+'CO2_brm_'+ens+'_'+dt+'.nc'
co2_out_ssl = cosmo_out+'CO2_ssl_'+ens+'_'+dt+'.nc'
cdo.expr("'CO2=(CO2_BG"+ens+"-CO2_GPP"+ens+"+CO2_RA"+ens+"+CO2_A_CH+CO2_A)/(1.-QV)'", input = "-remapnn,lon=7.99_lat=46.54 -selname,QV,CO2_BG"+ens+",CO2_GPP"+ens+",CO2_RA"+ens+",CO2_A_CH,CO2_A "+co2_in_fn, output = co2_out_jfj)
cdo.expr("'CO2=(CO2_BG"+ens+"-CO2_GPP"+ens+"+CO2_RA"+ens+"+CO2_A_CH+CO2_A)/(1.-QV)'", input = "-remapnn,lon=8.40_lat=47.48 -selname,QV,CO2_BG"+ens+",CO2_GPP"+ens+",CO2_RA"+ens+",CO2_A_CH,CO2_A "+co2_in_fn, output = co2_out_lhw)
cdo.expr("'CO2=(CO2_BG"+ens+"-CO2_GPP"+ens+"+CO2_RA"+ens+"+CO2_A_CH+CO2_A)/(1.-QV)'", input = "-remapnn,lon=8.18_lat=47.19 -selname,QV,CO2_BG"+ens+",CO2_GPP"+ens+",CO2_RA"+ens+",CO2_A_CH,CO2_A "+co2_in_fn, output = co2_out_brm)
cdo.expr("'CO2=(CO2_BG"+ens+"-CO2_GPP"+ens+"+CO2_RA"+ens+"+CO2_A_CH+CO2_A)/(1.-QV)'", input = "-remapnn,lon=7.92_lat=47.92 -selname,QV,CO2_BG"+ens+",CO2_GPP"+ens+",CO2_RA"+ens+",CO2_A_CH,CO2_A "+co2_in_fn, output = co2_out_ssl)
cdo.expr("'CO2=(CO2_BG"+ens+"-CO2_GPP"+ens+"+CO2_RA"+ens+"+CO2_A)/(1.-QV)'", input = "-remapnn,lon=7.99_lat=46.54 -selname,QV,CO2_BG"+ens+",CO2_GPP"+ens+",CO2_RA"+ens+",CO2_A "+co2_in_fn, output = co2_out_jfj)
cdo.expr("'CO2=(CO2_BG"+ens+"-CO2_GPP"+ens+"+CO2_RA"+ens+"+CO2_A)/(1.-QV)'", input = "-remapnn,lon=8.40_lat=47.48 -selname,QV,CO2_BG"+ens+",CO2_GPP"+ens+",CO2_RA"+ens+",CO2_A "+co2_in_fn, output = co2_out_lhw)
cdo.expr("'CO2=(CO2_BG"+ens+"-CO2_GPP"+ens+"+CO2_RA"+ens+"+CO2_A)/(1.-QV)'", input = "-remapnn,lon=8.18_lat=47.19 -selname,QV,CO2_BG"+ens+",CO2_GPP"+ens+",CO2_RA"+ens+",CO2_A "+co2_in_fn, output = co2_out_brm)
cdo.expr("'CO2=(CO2_BG"+ens+"-CO2_GPP"+ens+"+CO2_RA"+ens+"+CO2_A)/(1.-QV)'", input = "-remapnn,lon=7.92_lat=47.92 -selname,QV,CO2_BG"+ens+",CO2_GPP"+ens+",CO2_RA"+ens+",CO2_A "+co2_in_fn, output = co2_out_ssl)
files2cat_jfj.append(co2_out_jfj)
files2cat_lhw.append(co2_out_lhw)
files2cat_brm.append(co2_out_brm)
......
#!/usr/bin/env python
# model.py
import logging
import os
import sys
import subprocess
import da.cosmo.io4 as io
import numpy as np
from netCDF4 import Dataset
from datetime import datetime, timedelta
from dateutil import rrule
from cdo import *
from . import site_height
from da.cosmo.icbc4ctdas import ct
from itertools import repeat
from multiprocessing import Pool
from da.tools.general import to_datetime
identifier = 'ObservationOperator'
version = '10'
cdo = Cdo()
################### Begin Class ObservationOperator ###################
class ObservationOperator(object):
def __init__(self, dacycle=None):
self.ID = identifier
self.version = version
self.restart_filelist = []
self.output_filelist = []
self.outputdir = None # Needed for opening the samples.nc files created
logging.info('Observation Operator object initialized: %s' % self.ID)
if dacycle != None:
self.dacycle = dacycle
else:
self.dacycle = {}
def get_initial_data(self):
""" This method places all initial data needed by an ObservationOperator in the proper folder for the model """
def setup(self,dacycle):
""" Perform all steps necessary to start the observation operator through a simple Run() call """
self.dacycle = dacycle
self.outputdir = dacycle['dir.output']
def prepare_run(self):
""" Prepare the running of the actual forecast model, for example compile code """
# Define the name of the file that will contain the modeled output of each observation
self.simulated_file = os.path.join(self.outputdir, 'samples_simulated.%s.nc' % self.dacycle['time.sample.stamp'])
self.forecast_nmembers = int(self.dacycle['da.optimizer.nmembers'])
def run(self,lag,dacycle,statevector,advance=False):
members = statevector.ensemble_members[lag]
self.forecast_nmembers = int(self.dacycle['da.optimizer.nmembers'])
self.nparams = int(self.dacycle['nparameters'])
absolute_start_time = str((to_datetime(dacycle['abs.time.start'])).strftime('%Y%m%d%H'))
absolute_start_time_ch = str((to_datetime(dacycle['abs.time.start'])).strftime('%Y-%m-%d'))
starth = abs((to_datetime(dacycle['abs.time.start'])-dacycle['time.start']).days)*24
endh = abs((to_datetime(dacycle['abs.time.start'])-dacycle['time.finish']).days)*24
f = io.CT_CDF(self.simulated_file, method='create')
logging.debug('Creating new simulated observation file in ObservationOperator (%s)' % self.simulated_file)
dimid = f.createDimension('obs_num', size=None)
dimid = ('obs_num',)
savedict = io.std_savedict.copy()
savedict['name'] = "obs_num"
savedict['dtype'] = "int"
savedict['long_name'] = "Unique_Dataset_observation_index_number"
savedict['units'] = ""
savedict['dims'] = dimid
savedict['comment'] = "Unique index number within this dataset ranging from 0 to UNLIMITED."
f.add_data(savedict,nsets=0)
dimmember = f.createDimension('nmembers', size=self.forecast_nmembers)
dimmember = ('nmembers',)
savedict = io.std_savedict.copy()
savedict['name'] = "flask"
savedict['dtype'] = "float"
savedict['long_name'] = "mole_fraction_of_trace_gas_in_dry_air"
savedict['units'] = "ppm"
savedict['dims'] = dimid + dimmember
savedict['comment'] = "Simulated model value created by COSMO"
f.add_data(savedict,nsets=0)
# Open file with x,y,z,t of model samples that need to be sampled
f_in = io.ct_read(self.dacycle['ObsOperator.inputfile'],method='read')
# Get simulated values and ID
ids = f_in.get_variable('obs_num')
obs = f_in.get_variable('observed')
mdm = f_in.get_variable('modeldatamismatch')
f_in.close()
shape = (self.forecast_nmembers,mdm.size)
model_data=np.empty(shape=shape) # 3x7
self.lambda_file = os.path.join(self.outputdir, 'lambda.%s.nc' % self.dacycle['time.sample.stamp'])
ofile = Dataset(self.lambda_file, mode='w')
opar = ofile.createDimension('nparam', self.nparams)
omem = ofile.createDimension('nensembles', self.forecast_nmembers)#len(members.nmembers))
l = ofile.createVariable('lambda', np.float32, ('nensembles','nparam'),fill_value=-999.99)
co2 = np.empty(shape=(self.forecast_nmembers,self.nparams))
for m in range(0,self.forecast_nmembers):
co2[m,:] = members[m].param_values
l[:] = co2
ofile.close()
os.system('cp '+self.lambda_file+' '+dacycle['da.vprm']+'/lambdas.nc')
# os.chdir(dacycle['dir.da_run'])
#
# args = [
# (dacycle, starth+168*lag, endh+168*lag-1, n)
# for n in range(1,self.forecast_nmembers+1)
# ]
#
# with Pool(self.forecast_nmembers) as pool:
# pool.starmap(self.extract_model_data, args)
#
for i in range(0,self.forecast_nmembers):
idx = str(i+1).zfill(3)
cosmo_file = os.path.join('/store/empa/em05/parsenov/cosmo_data/real/model_'+idx+'_%s.nc' % dacycle['time.sample.stamp'])
ifile = Dataset(cosmo_file, mode='r')
model_data[i,:] = (np.squeeze(ifile.variables['CO2'][:])*29./44.01)*1E6 # in ppm
ifile.close()
for j,data in enumerate(zip(ids,obs,mdm)):
f.variables['obs_num'][j] = data[0]
f.variables['flask'][j,:] = model_data[:,j]
f.close()
logging.info('ObservationOperator finished successfully, output file written (%s)' % self.simulated_file)
def run_forecast_model(self, lag, dacycle, statevector, advance):
self.prepare_run()
self.run(lag, dacycle, statevector, advance)
def extract_model_data(self, dacycle, hstart, hstop, ensnum):
self.dacycle = dacycle
time_stamp = dacycle['time.sample.stamp']
abs_start_time = str((to_datetime(dacycle['abs.time.start'])).strftime('%Y%m%d%H'))
cosmo_out = dacycle['dir.da_run']+"/"+abs_start_time+"_"+str(hstart)+"_"+str(hstop+1)+"/cosmo/output/"
hhl_cosmo_out = dacycle['dir.da_run']+"/"+abs_start_time+"_0_168/cosmo/output/"
cosmo_save = "/store/empa/em05/parsenov/cosmo_data/"
hhl_fn = hhl_cosmo_out+'lffd'+abs_start_time+'c.nc'
ens = str(ensnum).zfill(3)
files2cat_jfj=[]
files2cat_lhw=[]
files2cat_brm=[]
files2cat_ssl=[]
if ens == "001":
cdo.selname("HHL", input = hhl_fn, output = cosmo_out+"hhl.nc")
cdo.remapnn("lon=7.99_lat=46.54,", input = cosmo_out+"hhl.nc", output = cosmo_out+"hhl_jfj.nc")
cdo.remapnn("lon=8.40_lat=47.48,", input = cosmo_out+"hhl.nc", output = cosmo_out+"hhl_lhw.nc")
cdo.remapnn("lon=8.18_lat=47.19,", input = cosmo_out+"hhl.nc", output = cosmo_out+"hhl_brm.nc")
cdo.remapnn("lon=7.92_lat=47.92,", input = cosmo_out+"hhl.nc", output = cosmo_out+"hhl_ssl.nc")
for dt in rrule.rrule(rrule.HOURLY, dtstart=to_datetime(dacycle['abs.time.start'])+timedelta(hours=hstart), until=to_datetime(dacycle['abs.time.start'])+timedelta(hours=hstop)):
dt=dt.strftime('%Y%m%d%H')
if ens == "001":
logging.info('Extracting output for time %s' % (str(dt)))
co2_in_fn = cosmo_out+'lffd'+dt+'.nc'
co2_out_jfj = cosmo_out+'CO2_jfj_'+ens+'_'+dt+'.nc'
co2_out_lhw = cosmo_out+'CO2_lhw_'+ens+'_'+dt+'.nc'
co2_out_brm = cosmo_out+'CO2_brm_'+ens+'_'+dt+'.nc'
co2_out_ssl = cosmo_out+'CO2_ssl_'+ens+'_'+dt+'.nc'
cdo.expr("'CO2=(CO2_BG"+ens+"-CO2_GPP"+ens+"+CO2_RA"+ens+"+CO2_A_CH+CO2_A)/(1.-QV)'", input = "-remapnn,lon=7.99_lat=46.54 -selname,QV,CO2_BG"+ens+",CO2_GPP"+ens+",CO2_RA"+ens+",CO2_A_CH,CO2_A "+co2_in_fn, output = co2_out_jfj)
cdo.expr("'CO2=(CO2_BG"+ens+"-CO2_GPP"+ens+"+CO2_RA"+ens+"+CO2_A_CH+CO2_A)/(1.-QV)'", input = "-remapnn,lon=8.40_lat=47.48 -selname,QV,CO2_BG"+ens+",CO2_GPP"+ens+",CO2_RA"+ens+",CO2_A_CH,CO2_A "+co2_in_fn, output = co2_out_lhw)
cdo.expr("'CO2=(CO2_BG"+ens+"-CO2_GPP"+ens+"+CO2_RA"+ens+"+CO2_A_CH+CO2_A)/(1.-QV)'", input = "-remapnn,lon=8.18_lat=47.19 -selname,QV,CO2_BG"+ens+",CO2_GPP"+ens+",CO2_RA"+ens+",CO2_A_CH,CO2_A "+co2_in_fn, output = co2_out_brm)
cdo.expr("'CO2=(CO2_BG"+ens+"-CO2_GPP"+ens+"+CO2_RA"+ens+"+CO2_A_CH+CO2_A)/(1.-QV)'", input = "-remapnn,lon=7.92_lat=47.92 -selname,QV,CO2_BG"+ens+",CO2_GPP"+ens+",CO2_RA"+ens+",CO2_A_CH,CO2_A "+co2_in_fn, output = co2_out_ssl)
files2cat_jfj.append(co2_out_jfj)
files2cat_lhw.append(co2_out_lhw)
files2cat_brm.append(co2_out_brm)
files2cat_ssl.append(co2_out_ssl)
cdo.cat(input = files2cat_jfj, output = cosmo_out+"CO2_jfj_"+ens+"_"+time_stamp+".nc")
cdo.cat(input = files2cat_lhw, output = cosmo_out+"CO2_lhw_"+ens+"_"+time_stamp+".nc")
cdo.cat(input = files2cat_brm, output = cosmo_out+"CO2_brm_"+ens+"_"+time_stamp+".nc")
cdo.cat(input = files2cat_ssl, output = cosmo_out+"CO2_ssl_"+ens+"_"+time_stamp+".nc")
sites = ("lhw","brm","jfj","ssl")
for s,ss in enumerate(sites):
site_height.main(cosmo_out, str(ens), ss, time_stamp)
cdo.intlevel("860", input = cosmo_out+"CO2_60lev_"+ens+"_lhw_"+time_stamp+".nc", output = cosmo_out+"modelled_"+ens+"_lhw_"+time_stamp+".nc")
cdo.intlevel("1009", input = cosmo_out+"CO2_60lev_"+ens+"_brm_"+time_stamp+".nc", output = cosmo_out+"modelled_"+ens+"_brm_"+time_stamp+".nc")
cdo.intlevel("3580", input = cosmo_out+"CO2_60lev_"+ens+"_jfj_"+time_stamp+".nc", output = cosmo_out+"modelled_"+ens+"_jfj_"+time_stamp+".nc")
cdo.intlevel("1205", input = cosmo_out+"CO2_60lev_"+ens+"_ssl_"+time_stamp+".nc", output = cosmo_out+"modelled_"+ens+"_ssl_"+time_stamp+".nc")
cdo.cat(input = cosmo_out+"modelled_"+ens+"_brm_"+time_stamp+".nc "+cosmo_out+"modelled_"+ens+"_jfj_"+time_stamp+".nc "+cosmo_out+"modelled_"+ens+"_lhw_"+time_stamp+".nc "+cosmo_out+"modelled_"+ens+"_ssl_"+time_stamp+".nc ", output = cosmo_save+"model_"+ens+"_"+time_stamp+".nc")
logging.info('Extracting done for ens %s' % (ens))
################### End Class ObservationOperator ###################
class RandomizerObservationOperator(ObservationOperator):
""" This class holds methods and variables that are needed to use a random number generated as substitute
for a true observation operator. It takes observations and returns values for each obs, with a specified
amount of white noise added
"""
if __name__ == "__main__":
pass
......@@ -348,8 +348,8 @@ class ObsPackObservations(Observations):
for obs in self.datalist: # first loop over all available data points to set flags correctly
# obs.mdm = 0.05 # default is very high model-data-mismatch, until explicitly set by script
obs.mdm = 2.5 # default is very high model-data-mismatch, until explicitly set by script
# obs.mdm = 2.5 # default is very high model-data-mismatch, until explicitly set by script
obs.mdm = 1 # default is very high model-data-mismatch, until explicitly set by script
obs.flag = 0
identifier = obs.code
# species, site, method, lab, datasetnr = identifier.split('_')
......
......@@ -258,6 +258,7 @@ def prepare_state(dacycle, statevector):
saved_sv = os.path.join(dacycle['dir.restart'], 'savestate_%s.nc' % dacycle['da.restart.tstamp'].strftime('%Y%m%d'))
statevector.read_from_file(saved_sv) # by default will read "opt"(imized) variables, and then propagate
statevector.remove_negs(saved_sv)
# Now propagate the ensemble by one cycle to prepare for the current cycle
statevector.propagate(dacycle)
......
......@@ -292,8 +292,7 @@ class StateVector(object):
# rands_bg = np.random.uniform(low=-0.05, high=0.05, size=1)
# rands = np.random.randn(self.nparams-1)
# rands_bg = np.random.randn(1)*1E-4 # x variance(1E-4)
# rands = np.random.normal(loc=0.0, scale=0.4, size=self.nparams-1)
rands = np.random.normal(loc=0.0, scale=1000, size=self.nparams-1)
rands = np.random.normal(loc=0.0, scale=0.25, size=self.nparams-1)
#rands = np.random.normal(loc=0.0, scale=0.7, size=self.nparams-1)
rands_bg = np.random.normal(loc=0.0, scale=0.05, size=1) #*1E-4 # x variance(1E-4)
# rands_bg = np.random.normal(loc=0.0, scale=1E-4, size=1) #*1E-4 # x variance(1E-4)
......@@ -307,7 +306,7 @@ class StateVector(object):