Commit 58818fde authored by Ingrid Luijkx's avatar Ingrid Luijkx
Browse files

Changes for South America runs: regions files in analysis directory are copied...

Changes for South America runs: regions files in analysis directory are copied from the one specified in the system.rc file. Region pickle files are deleted in the analysis directory, to make sure the correct one is used. Option to use covariances for less than 23 transcom regions, but still use 23 for the analysis (does not work in tcfluxes.nc). Update in observation_operator.py to use restart file in first cycle also for more than one resolution (i.e. incl. zoom regions).
parent a56a298d
......@@ -12,15 +12,18 @@ import da.tools.io4 as io
# Get masks of different region definitions
matrix_file = os.path.join(analysisdir, 'regions.nc')
matrix_file = os.path.join(analysisdir, 'copied_regions.nc')
cdf_temp = io.CT_CDF(matrix_file, 'read')
transcommask = cdf_temp.get_variable('transcom_regions')
if transcommask.max() < 23:
if 'transcom_regions_original' in cdf_temp.variables:
transcommask = cdf_temp.get_variable('transcom_regions_original')
olson240mask = cdf_temp.get_variable('regions')
olsonmask = cdf_temp.get_variable('land_ecosystems')
oifmask = cdf_temp.get_variable('ocean_regions')
dummy = cdf_temp.close()
matrix_file = os.path.join(analysisdir, 'olson_extended.nc')
matrix_file = os.path.join(analysisdir, 'copied_regions_extended.nc')
cdf_temp = io.CT_CDF(matrix_file, 'read')
olson_ext_mask = cdf_temp.get_variable('regions')
dummy = cdf_temp.close()
......@@ -56,7 +59,7 @@ for line in temp:
olsonshort.append(abbr)
olsonextnams = []
matrix_file = os.path.join(analysisdir, 'olson_extended.nc')
matrix_file = os.path.join(analysisdir, 'copied_regions_extended.nc')
cdf_temp = io.CT_CDF(matrix_file, 'read')
keys = cdf_temp.ncattrs()
keys.sort()
......
!!! Info for the CarbonTracker data assimilation system
datadir : /Volumes/DataRaid/CO2/carbontracker/input/ctdas_2012/
! For ObsPack
obspack.input.dir : ${datadir}/obspacks/${obspack.input.id}
obspack.input.id : obspack_co2_1_PROTOTYPE_v1.0.3_2013-01-29
ocn.covariance : ${datadir}/oceans/oif/cov_ocean.2000.01.nc
deltaco2.prefix : oif_p3_era40.dpco2
bio.covariance : ${datadir}/covariances/sibcasa/cov_sibcasaregion_001_143.nc
regtype : olson19_oif30
nparameters : 174
random.seed : 4385
random.seed.init: ${datadir}/randomseedinit.pickle
regionsfile : ${datadir}/regions_sibcasa.nc
extendedregionsfile: ${datadir}/sibcasa_extended.nc
! Include a naming scheme for the variables
#include NamingScheme.wp_Mar2011.rc
! Info on the sites file used
! For ObsPack
obs.sites.rc : ${obspack.input.dir}/summary/sites_weights_geocarbon_July2013.rc
!!! Info for the CarbonTracker data assimilation system
datadir : /Volumes/DataRaid/CO2/carbontracker/input/ctdas_2012/
! For ObsPack
obspack.input.dir : ${datadir}/obspacks/${obspack.input.id}
obspack.input.id : obspack_co2_1_PROTOTYPE_v1.0.3_2013-01-29
ocn.covariance : ${datadir}/oceans/oif/cov_ocean.2000.01.nc
deltaco2.prefix : oif_p3_era40.dpco2
bio.covariance : ${datadir}/covariances/sibcasa/cov_sibcasa_koppen_sam_region_001_160.nc
regtype : olson19_oif30
nparameters : 191
random.seed : 4385
random.seed.init: ${datadir}/randomseedinit.pickle
regionsfile : ${datadir}/regions_sibcasa_koppen_sam.nc
extendedregionsfile: ${datadir}/sibcasa_koppen_sam_extended.nc
! Include a naming scheme for the variables
#include NamingScheme.wp_Mar2011.rc
! Info on the sites file used
! For ObsPack
obs.sites.rc : ${obspack.input.dir}/summary/sites_weights_geocarbon_July2013.rc
......@@ -27,6 +27,7 @@ import shutil
import datetime
import subprocess
from string import join
import glob
sys.path.append(os.getcwd())
sys.path.append("../../")
......@@ -135,12 +136,12 @@ class TM5ObservationOperator(ObservationOperator):
#Use a TM5 restart file in the first cycle (instead of init file). Used now for the CO project.
if self.dacycle.has_key('da.obsoperator.restartfileinfirstcycle'):
restartfilename = self.dacycle['da.obsoperator.restartfileinfirstcycle']
sourcedir = self.dacycle['dir.exec']
targetdir = self.tm_settings[self.savedirkey]
create_dirs(targetdir)
sourcefile = os.path.join(sourcedir,restartfilename)
shutil.copy(sourcefile, sourcefile.replace(sourcedir, targetdir))
logging.debug('Copied TM5 restart file to TM5 restart directory for first cycle: %s'%sourcefile)
for file in glob.glob(restartfilename):
fname = os.path.split(file)[1]
logging.debug('Copied TM5 restart file to TM5 restart directory for first cycle: %s'%fname)
shutil.copy(file,os.path.join(targetdir,fname))
# Replace the rc filename for TM5 with the newly created one in the new run directory
......
......@@ -57,6 +57,7 @@ Other functions in the module initexit that are related to the control of a DA c
import logging
import os
import sys
import glob
import shutil
import copy
import getopt
......@@ -319,6 +320,16 @@ class CycleControl(dict):
strippedname = os.path.split(self['jobrcfilename'])[-1]
self['jobrcfilename'] = os.path.join(self['dir.exec'], strippedname)
shutil.copy(os.path.join(self.dasystem['regionsfile']),os.path.join(self['dir.exec'],'da','analysis','copied_regions.nc'))
logging.info('Copied regions file to the analysis directory: %s'%os.path.join(self.dasystem['regionsfile']))
shutil.copy(os.path.join(self.dasystem['extendedregionsfile']),os.path.join(self['dir.exec'],'da','analysis','copied_regions_extended.nc'))
logging.info('Copied extended regions file to the analysis directory: %s'%os.path.join(self.dasystem['extendedregionsfile']))
for filename in glob.glob(os.path.join(self['dir.exec'],'da','analysis','*.pickle')):
logging.info('Deleting pickle file %s to make sure the correct regions are used'%os.path.split(filename)[1])
os.remove(filename)
for filename in glob.glob(os.path.join(self['dir.exec'],'*.pickle')):
logging.info('Deleting pickle file %s to make sure the correct regions are used'%os.path.split(filename)[1])
os.remove(filename)
if self.has_key('random.seed.init'):
self.read_random_seed(True)
......@@ -539,6 +550,7 @@ class CycleControl(dict):
nextrestartfilename = self['da.restart.fname'].replace(jobid,nextjobid)
nextlogfilename = logfile.replace(jobid,nextjobid)
template += '\nexport icycle_in_job=%d\npython %s rc=%s %s >&%s\n' % (cycle+1,execcommand, nextrestartfilename, join(self.opts, ''), nextlogfilename,)
#template += '\nexport icycle_in_job=%d\npython %s rc=%s %s >&%s &\n' % (cycle+1,execcommand, nextrestartfilename, join(self.opts, ''), nextlogfilename,)
# write and submit
self.daplatform.write_job(jobfile, template, jobid)
......
#! /bin/env bash
#SBATCH -p normal
#SBATCH -t 12:00:00
#!/bin/sh
#$ das.py
#$ co2
#$ nserial 1
#$ 06:30:00
#$ /bin/sh
echo ########################
echo "All output piped to file template.log"
source /usr/local/Modules/3.2.8/init/sh
source /opt/intel/bin/ifortvars.sh intel64
export HOST='capegrim'
module load python
module load nco
export icycle_in_job=999
python template.py rc=template.rc $1 >& template.log &
python template.py rc=template.rc -v $1 >& template.log &
......@@ -51,6 +51,7 @@ obsoperator = TM5ObservationOperator(dacycle['da.obsoperator.rc'])
samples = ObsPackObservations()
#samples = CtObservations()
statevector = CO2GriddedStateVector()
#statevector = CO2StateVector()
optimizer = CO2Optimizer()
##########################################################################################
......@@ -68,7 +69,7 @@ ensemble_smoother_pipeline(dacycle, platform, dasystem, samples, statevector, ob
################### All done, extra stuff can be added next, such as analysis
##########################################################################################
analysis_pipeline(dacycle, platform, dasystem, samples, statevector, obsoperator )
analysis_pipeline(dacycle, platform, dasystem, samples, statevector )
archive_pipeline(dacycle, platform, dasystem)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment