Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
NearRealTimeCTDAS
CTDAS
Commits
f7a938f2
Commit
f7a938f2
authored
Feb 21, 2020
by
Woude, Auke van der
Browse files
Added biosphere, multiple countries and biosphere
parent
c0706f73
Changes
5
Expand all
Hide whitespace changes
Inline
Side-by-side
da/ffdas/emissionmodel.py
View file @
f7a938f2
This diff is collapsed.
Click to expand it.
da/ffdas/obs.py
View file @
f7a938f2
...
...
@@ -14,12 +14,14 @@ import sys
import
logging
import
datetime
as
dtm
from
string
import
strip
from
numpy
import
array
,
logical_and
import
numpy
as
np
sys
.
path
.
append
(
os
.
getcwd
())
sys
.
path
.
append
(
'../../'
)
from
pylab
import
*
#from pylab import *
from
numpy
import
*
from
scipy
import
*
from
matplotlib.pylab
import
*
identifier
=
'Rotterdam CO2 mole fractions'
version
=
'0.0'
...
...
@@ -29,7 +31,7 @@ import da.tools.io4 as io
import
da.tools.rc
as
rc
################### Begin Class RdamObservations ###################
class
R
dam
Observations
(
Observations
):
class
R
INGO
Observations
(
Observations
):
""" an object that holds data + methods and attributes needed to manipulate mole fraction values """
def
setup
(
self
,
dacycle
):
...
...
@@ -62,28 +64,32 @@ class RdamObservations(Observations):
"""
infile
=
os
.
path
.
join
(
self
.
obspack_dir
,
self
.
obspack_id
)
logging
.
debug
(
'infile = {}'
.
format
(
infile
))
f
=
open
(
infile
,
'r'
)
lines
=
f
.
readlines
()
f
.
close
()
ncfilelist
=
[]
for
line
in
lines
:
if
line
.
startswith
(
'#'
):
continue
# header
dum
=
line
.
split
(
","
)
ncfile
=
[
dum
[
1
]]
if
not
ncfile
[
0
]
in
ncfilelist
:
ncfilelist
.
extend
(
ncfile
)
ncfilelist
=
[
'obsfiles/'
+
line
.
split
(
','
)[
1
]
for
line
in
lines
if
not
line
[
0
]
==
'#'
]
ncfilelist
=
ncfilelist
[:
int
(
dacycle
.
dasystem
[
'obs.input.nr'
])]
# ncfilelist = []
# for line in lines:
# if line.startswith('#'): continue # header
#
# dum = line.split(",")
# ncfile = [dum[1]]
#
# if not ncfile[0] in ncfilelist:
# ncfilelist.extend(ncfile)
for
ncfile
in
ncfilelist
:
infile
=
os
.
path
.
join
(
self
.
obspack_dir
,
ncfile
+
'.nc'
)
ncf
=
io
.
ct_read
(
infile
,
'read'
)
idates
=
ncf
.
get_variable
(
'Times'
)
idates
=
[
b
''
.
join
(
d
)
for
d
in
idates
]
dates
=
array
([
dtm
.
datetime
.
strptime
(
d
.
decode
()
,
'%Y-%m-%d_%H:%M:%S'
)
for
d
in
idates
])
idates
=
[
''
.
join
(
d
)
for
d
in
idates
]
dates
=
array
([
dtm
.
datetime
.
strptime
(
d
,
'%Y-%m-%d_%H:%M:%S'
)
for
d
in
idates
])
subselect
=
logical_and
(
dates
>=
self
.
startdate
,
dates
<
=
self
.
enddate
).
nonzero
()[
0
]
subselect
=
logical_and
(
dates
>=
self
.
startdate
,
dates
<
self
.
enddate
).
nonzero
()[
0
]
dates
=
dates
.
take
(
subselect
,
axis
=
0
)
datasetname
=
ncfile
# use full name of dataset to propagate for clarity
...
...
@@ -120,7 +126,7 @@ class RdamObservations(Observations):
logging
.
info
(
"Successfully read data from model sample file (%s)"
%
filename
)
obs_ids
=
self
.
getvalues
(
'id'
).
tolist
()
ids
=
list
(
map
(
int
,
ids
))
obs_
ids
=
list
(
map
(
int
,
obs_
ids
))
missing_samples
=
[]
...
...
@@ -135,7 +141,7 @@ class RdamObservations(Observations):
logging
.
warning
(
'Model samples were found that did not match any ID in the observation list. Skipping them...'
)
logging
.
debug
(
"Added %d simulated values to the Data list"
%
(
len
(
ids
)
-
len
(
missing_samples
)))
self
.
simulated_concs
=
simulated
def
write_sample_coords
(
self
,
obsinputfile
):
"""
...
...
@@ -323,7 +329,6 @@ class RdamObservations(Observations):
identifier
=
obs
.
code
# species, site, method, lab, datasetnr = identifier.split('_')
if
identifier
in
site_info
:
if
identifier
in
site_hourly
:
obs
.
samplingstrategy
=
2
...
...
da/ffdas/observationoperator.py
View file @
f7a938f2
This diff is collapsed.
Click to expand it.
da/ffdas/pipeline.py
View file @
f7a938f2
...
...
@@ -128,9 +128,8 @@ def forward_pipeline(dacycle, platform, dasystem, samples, statevector, obsopera
def
analysis_pipeline
(
dacycle
,
platform
,
dasystem
,
samples
,
statevector
):
""" Main entry point for analysis of ctdas results """
from
da.analysis.expand_fluxes
import
save_weekly_avg_1x1_data
,
save_weekly_avg_state_data
,
save_weekly_avg_tc_data
,
save_weekly_avg_ext_tc_data
,
save_weekly_avg_agg_data
from
da.analysis.expand_molefractions
import
write_mole_fractions
from
da.analysis.summarize_obs
import
summarize_obs
from
da.analysis.expand_fluxes_ffdas
import
save_weekly_avg_1x1_data
,
save_weekly_avg_state_data
,
save_weekly_avg_tc_data
,
save_weekly_avg_ext_tc_data
,
save_weekly_avg_agg_data
from
da.analysis.expand_molefractions_ffdas
import
write_mole_fractions
from
da.analysis.time_avg_fluxes
import
time_avg
logging
.
info
(
header
+
"Starting analysis"
+
footer
)
...
...
@@ -158,16 +157,16 @@ def analysis_pipeline(dacycle, platform, dasystem, samples, statevector):
# save_weekly_avg_agg_data(dacycle,region_aggregate='olson_extended')
# save_weekly_avg_agg_data(dacycle,region_aggregate='country')
logging
.
info
(
header
+
"Starting monthly and yearly averages"
+
footer
)
#
logging.info(header + "Starting monthly and yearly averages" + footer)
time_avg
(
dacycle
,
'flux1x1'
)
#
time_avg(dacycle,'flux1x1')
# time_avg(dacycle,'transcom')
# time_avg(dacycle,'transcom_extended')
# time_avg(dacycle,'olson')
# time_avg(dacycle,'olson_extended')
# time_avg(dacycle,'country')
logging
.
info
(
header
+
"Finished analysis"
+
footer
)
#
logging.info(header + "Finished analysis" + footer)
def
archive_pipeline
(
dacycle
,
platform
,
dasystem
):
""" Main entry point for archiving of output from one disk/system to another """
...
...
@@ -229,21 +228,21 @@ def prepare_state(dacycle, dasystem, statevector,samples, obsoperator, emismodel
logging
.
info
(
header
+
"starting prepare_state"
+
footer
)
if
not
dacycle
[
'time.restart'
]:
if
not
dacycle
[
'time.restart'
]:
# Fill each week from n=1 to n=nlag with a new ensemble
for
n
in
range
(
statevector
.
nlag
):
date
=
dacycle
[
'time.start'
]
+
datetime
.
timedelta
(
days
=
(
n
+
0.5
)
*
int
(
dacycle
[
'time.cycle'
]))
for
lag
in
range
(
statevector
.
nlag
):
date
=
dacycle
[
'time.start'
]
+
datetime
.
timedelta
(
days
=
(
lag
+
0.5
)
*
int
(
dacycle
[
'time.cycle'
]))
cov
=
statevector
.
get_covariance
(
date
,
dacycle
)
statevector
.
make_new_ensemble
(
dacycle
,
n
,
cov
)
statevector
.
make_new_ensemble
(
lag
,
cov
)
# Prepare emissions (spatial and temporal distribution) if emisflag is set to 1
if
dacycle
.
dasystem
[
'run.emisflag'
]
==
'1'
:
emismodel
.
get_emis
(
dacycle
,
ps
do
=
1
)
emismodel
.
get_emis
(
dacycle
,
samples
,
do_pseu
do
=
1
)
# Prepare new pseudo-observations if obsflag is set to 1
if
dacycle
.
dasystem
[
'run.obsflag'
]
==
'1'
:
obsoperator
.
run_forecast_model
(
dacycle
,
ps
do
=
1
,
adv
=
0
)
obsoperator
.
run_forecast_model
(
dacycle
,
do_pseu
do
=
1
,
adv
=
0
)
else
:
# Read the statevector data from file
...
...
@@ -325,12 +324,11 @@ def sample_step(dacycle, samples, statevector, obsoperator, emismodel, lag, adva
# Run the observation operator, store data in different file for advance
if
dacycle
.
dasystem
[
'run.emisflagens'
]
==
'1'
:
emismodel
.
get_emis
(
dacycle
,
ps
do
=
0
)
emismodel
.
get_emis
(
dacycle
,
samples
,
do_pseu
do
=
0
)
if
not
advance
:
obsoperator
.
run_forecast_model
(
dacycle
,
ps
do
=
0
,
adv
=
0
)
obsoperator
.
run_forecast_model
(
dacycle
,
do_pseu
do
=
0
,
adv
=
0
)
else
:
obsoperator
.
run_forecast_model
(
dacycle
,
psdo
=
0
,
adv
=
1
)
obsoperator
.
run_forecast_model
(
dacycle
,
do_pseudo
=
0
,
adv
=
1
)
# Read forecast model samples that were written to NetCDF files by each member. Add them to the exisiting
# Observation object for each sample loop. This data fill be written to file in the output folder for each sample cycle.
...
...
@@ -342,7 +340,6 @@ def sample_step(dacycle, samples, statevector, obsoperator, emismodel, lag, adva
if
os
.
path
.
exists
(
sampling_coords_file
):
samples
.
add_simulations
(
obsoperator
.
simulated_file
)
else
:
logging
.
warning
(
"No simulations added, because input file does not exist (no samples found in obspack)"
)
# Now write a small file that holds for each observation a number of values that we need in postprocessing
#filename = samples.write_sample_coords()
...
...
da/ffdas/stilt-ops_urbanall.rc
View file @
f7a938f2
!!! Info for the CarbonTracker data assimilation system
datadir : /home/awoude/ffdas/
test
/Data
datadir : /home/awoude/ffdas/
RINGO
/Data
! list of all observation sites
obs.input.id : obsfiles.csv
! number of observation sites included; number of species included and to be used in inversion
obs.input.nr : 7
obs.spec.nr : 4
obs.input.nr : 2
obs.spec.nr : 1
obs.spec.name : co2
! number of emission categories defined in the emission model
obs.cat.nr : 14
! For Rdam obs
obs.sites.rc : ${datadir}/sites_weights.rc
obs.sites.times : 12; 16
! number of parameters
nparameters :
44
nparameters :
50
! set fixed seed for random number generator, or use 0 if you want to use any random seed
random.seed : 4385
!file with prior estimate of scaling factors (statevector) and covariances
emis.pparam : param_values.nc
ff.covariance : covariances.nc
ff.covariance : covariances
2
.nc
!file with emission model parameter values
emis.paramfile : emis_parameters.csv
emis.paramfiles : emission_factors
emis.ncountries : 10
countries : AUS; BEL; CZE; FRA; DEU; LUX; NED; POL; CHE; GBR
! switch (1=on/0=off) and input data for background CO2 and CO concentrations
obs.bgswitch : 1
...
...
@@ -28,8 +32,11 @@ obs.background : ${datadir}/background.nc
! input data for emission model
emis.input.spatial : spatial_data.nc
emis.input.tempobs : temporal_data.nc
emis.input.tempprior : temporal_data.nc
emis.input.tempobs : time_profiles_stations.nc
emis.input.tempprior : time_profiles_stations.nc
! Area of the gridboxes
area.file : /home/awoude/ffdas/RINGO/Data/area.nc
! overwrite existing prior/ensemble emission files + pseudo-data (0: keep existing files; 1: create new files)
run.emisflag : 0
...
...
@@ -37,10 +44,10 @@ run.emisflagens : 1
run.obsflag : 0
! back trajectory time of STILT footprints, also applied to OPS (in hours)
run.backtime :
6
run.backtime :
24
! choose propagation scheme:
! 1: no propagation, start each cycle with the same prior parameter values and covariance matrix
! 2: propagation of optimized parameter values, but not of the covariance matrix
! 3: propagation of both optimized parameter values and covariance matrix
run.propscheme :
3
run.propscheme :
2
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment