Commit 933f10d4 authored by Woude, Auke van der's avatar Woude, Auke van der
Browse files

Remove R code; STILT ObsOp in Python

parent f0075661
#!/bin/sh
#$ das.py
#$ co2
#$ nserial 1
#$ 06:30:00
#$ /bin/sh
echo "All output piped to file template.log"
#source /usr/local/Modules/3.2.8/init/sh
#source /opt/intel/bin/ifortvars.sh intel64
export HOST='daint'
#module load python
export icycle_in_job=999
python ctdas-stilt.py rc=ctdas-stilt.rc -v $1 >& ctdas-stilt.log &
Traceback (most recent call last):
File "ctdas-stilt.py", line 30, in <module>
from da.platform.cartesius import CartesiusPlatform
File "/nfs/home5/awoude/CTDAS/da/platform/cartesius.py", line 135
print 'output', output
^
SyntaxError: Missing parentheses in call to 'print'. Did you mean print('output', output)?
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu.
This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation,
version 3. This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program. If not, see <http://www.gnu.org/licenses/>."""
#!/usr/bin/env python
#################################################################################################
# First order of business is always to make all other python modules accessible through the path
#################################################################################################
import sys
import os
import logging
sys.path.append(os.getcwd())
#################################################################################################
# Next, import the tools needed to initialize a data assimilation cycle
#################################################################################################
from da.tools.initexit import start_logger, validate_opts_args, parse_options, CycleControl
from da.tools.pipeline import forward_pipeline, header, footer
from da.platform.cartesius import CartesiusPlatform
from da.baseclasses.dasystem import DaSystem
from da.baseclasses.statevector import StateVector
from da.carbondioxide.obspack_globalviewplus2 import ObsPackObservations
from da.carbondioxide.optimizer import CO2Optimizer
from da.baseclasses.observationoperator import ObservationOperator
#from da.analysis.expand_fluxes import save_weekly_avg_1x1_data, save_weekly_avg_state_data, save_weekly_avg_tc_data, save_weekly_avg_ext_tc_data
#from da.analysis.expand_molefractions import write_mole_fractions
#################################################################################################
# Parse and validate the command line options, start logging
#################################################################################################
start_logger()
opts, args = parse_options()
opts, args = validate_opts_args(opts, args)
#################################################################################################
# Create the Cycle Control object for this job
#################################################################################################
dacycle = CycleControl(opts, args)
platform = MaunaloaPlatform()
dasystem = DaSystem(dacycle['da.system.rc'])
obsoperator = ObservationOperator(dacycle['da.obsoperator.rc'])
samples = ObsPackObservations()
statevector = StateVector()
optimizer = CO2Optimizer()
##########################################################################################
################### ENTER THE PIPELINE WITH THE OBJECTS PASSED BY THE USER ###############
##########################################################################################
logging.info(header + "Entering Pipeline " + footer)
#ensemble_smoother_pipeline(dacycle, platform, dasystem, samples, statevector, obsoperator, optimizer)
forward_pipeline(dacycle, platform, dasystem, samples, statevector, obsoperator, optimizer)
##########################################################################################
################### All done, extra stuff can be added next, such as analysis
##########################################################################################
#logging.info(header + "Starting analysis" + footer)
#sys.exit(0)
#
#save_weekly_avg_1x1_data(dacycle, statevector)
#save_weekly_avg_state_data(dacycle, statevector)
#save_weekly_avg_tc_data(dacycle, statevector)
#save_weekly_avg_ext_tc_data(dacycle)
#write_mole_fractions(dacycle)
#
#sys.exit(0)
! CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
! Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
! updates of the code. See also: http://www.carbontracker.eu.
!
! This program is free software: you can redistribute it and/or modify it under the
! terms of the GNU General Public License as published by the Free Software Foundation,
! version 3. This program is distributed in the hope that it will be useful, but
! WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
! FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along with this
! program. If not, see <http://www.gnu.org/licenses/>.
! author: Wouter Peters
!
! This is a blueprint for an rc-file used in CTDAS. Feel free to modify it, and please go to the main webpage for further documentation.
!
! Note that rc-files have the convention that commented lines start with an exclamation mark (!), while special lines start with a hashtag (#).
!
! When running the script start_ctdas.sh, this /.rc file will be copied to your run directory, and some items will be replaced for you.
! The result will be a nearly ready-to-go rc-file for your assimilation job. The entries and their meaning are explained by the comments below.
!
!
! HISTORY:
!
! Created on August 20th, 2013 by Wouter Peters
!
!
! The time for which to start and end the data assimilation experiment in format YYYY-MM-DD HH:MM:SS
time.start : 2016-01-10 00:00:00
time.finish : 2016-01-15 00:00:00
! Whether to restart the CTDAS system from a previous cycle, or to start the sequence fresh. Valid entries are T/F/True/False/TRUE/FALSE
time.restart : False
! The length of a cycle is given in days, such that the integer 7 denotes the typically used weekly cycle. Valid entries are integers > 1
time.cycle : 7
! The number of cycles of lag to use for a smoother version of CTDAS. CarbonTracker CO2 typically uses 5 weeks of lag. Valid entries are integers > 0
time.nlag : 3
! The directory under which the code, input, and output will be stored. This is the base directory for a run. The word
! '/' will be replaced through the start_ctdas.sh script by a user-specified folder name. DO NOT REPLACE
dir.da_run : /projects/0/ctdas/awoude
! The resources used to complete the data assimilation experiment. This depends on your computing platform.
! The number of cycles per job denotes how many cycles should be completed before starting a new process or job, this
! allows you to complete many cycles before resubmitting a job to the queue and having to wait again for resources.
! Valid entries are integers > 0
da.resources.ncycles_per_job : 1
! The ntasks specifies the number of threads to use for the MPI part of the code, if relevant. Note that the CTDAS code
! itself is not parallelized and the python code underlying CTDAS does not use multiple processors. The chosen observation
! operator though might use many processors, like TM5. Valid entries are integers > 0
da.resources.ntasks : 1
! This specifies the amount of wall-clock time to request for each job. Its value depends on your computing platform and might take
! any form appropriate for your system. Typically, HPC queueing systems allow you a certain number of hours of usage before
! your job is killed, and you are expected to finalize and submit a next job before that time. Valid entries are strings.
da.resources.ntime : 04:00:00
! The resource settings above will cause the creation of a job file in which 2 cycles will be run, and 30 threads
! are asked for a duration of 4 hours
!
! Info on the DA system used, this depends on your application of CTDAS and might refer to for instance CO2, or CH4 optimizations.
!
da.system : CarbonTracker
! The specific settings for your system are read from a separate rc-file, which points to the data directories, observations, etc
da.system.rc : da/rc/carbontracker_random.rc
! This flag should probably be moved to the da.system.rc file. It denotes which type of filtering to use in the optimizer
da.system.localization : None
! Info on the observation operator to be used, these keys help to identify the settings for the transport model in this case
da.obsoperator : STILT
!
! The TM5 transport model is controlled by an rc-file as well. The value below refers to the configuration of the TM5 model to
! be used as observation operator in this experiment.
!
da.obsoperator.home : da/stilt
da.obsoperator.rc : ${da.obsoperator.home}/stilt.rc
!
! The number of ensemble members used in the experiment. Valid entries are integers > 2
!
da.optimizer.nmembers : 10
! Finally, info on the archive task, if any. Archive tasks are run after each cycle to ensure that the results of each cycle are
! preserved, even if you run on scratch space or a temporary disk. Since an experiment can take multiple weeks to complete, moving
! your results out of the way, or backing them up, is usually a good idea. Note that the tasks are commented and need to be uncommented
! to use this feature.
! The following key identifies that two archive tasks will be executed, one called 'alldata' and the other 'resultsonly'.
!task.rsync : alldata onlyresults
! The specifics for the first task.
! 1> Which source directories to back up. Valid entry is a list of folders separated by spaces
! 2> Which destination directory to use. Valid entries are a folder name, or server and folder name in rsync format as below
! 3> Which flags to add to the rsync command
! The settings below will result in an rsync command that looks like:
!
! rsync -auv -e ssh ${dir.da_run} you@yourserver.com:/yourfolder/
!
!task.rsync.alldata.sourcedirs : ${dir.da_run}
!task.rsync.alldata.destinationdir : you@yourserver.com:/yourfolder/
!task.rsync.alldata.flags g -auv -e ssh
! Repeated for rsync task 2, note that we only back up the analysis and output dirs here
!task.rsync.onlyresults.sourcedirs : ${dir.da_run}/analysis ${dir.da_run}/output
!task.rsync.onlyresults.destinationdir : you@yourserver.com:/yourfolder/
!task.rsync.onlyresults.flags : -auv -e ssh
......@@ -45,7 +45,7 @@ File created on 21 Ocotber 2008.
def proceed_dialog(txt, yes=['y', 'yes'], all=['a', 'all', 'yes-to-all']):
""" function to ask whether to proceed or not """
response = raw_input(txt)
response = input(txt)
if response.lower() in yes:
return 1
if response.lower() in all:
......@@ -113,7 +113,7 @@ def save_weekly_avg_1x1_data(dacycle, statevector):
fire = np.array(file.get_variable(dacycle.dasystem['background.co2.fires.flux']))
fossil = np.array(file.get_variable(dacycle.dasystem['background.co2.fossil.flux']))
#mapped_parameters = np.array(file.get_variable(dacycle.dasystem['final.param.mean.1x1']))
if dacycle.dasystem['background.co2.biosam.flux'] in file.variables.keys():
if dacycle.dasystem['background.co2.biosam.flux'] in list(file.variables.keys()):
sam = True
biosam = np.array(file.get_variable(dacycle.dasystem['background.co2.biosam.flux']))
firesam = np.array(file.get_variable(dacycle.dasystem['background.co2.firesam.flux']))
......@@ -162,7 +162,7 @@ def save_weekly_avg_1x1_data(dacycle, statevector):
#
# if prior, do not multiply fluxes with parameters, otherwise do
#
print gridensemble.shape, bio.shape, gridmean.shape
print(gridensemble.shape, bio.shape, gridmean.shape)
biomapped = bio * gridmean
oceanmapped = ocean * gridmean
biovarmapped = bio * gridensemble
......@@ -184,7 +184,7 @@ def save_weekly_avg_1x1_data(dacycle, statevector):
savedict['count'] = next
ncf.add_data(savedict)
print biovarmapped.shape
print(biovarmapped.shape)
savedict = ncf.standard_var(varname='bio_flux_%s_ensemble' % qual_short)
savedict['values'] = biovarmapped.tolist()
savedict['dims'] = dimdate + dimensemble + dimgrid
......@@ -301,7 +301,7 @@ def save_weekly_avg_state_data(dacycle, statevector):
fire = np.array(file.get_variable(dacycle.dasystem['background.co2.fires.flux']))
fossil = np.array(file.get_variable(dacycle.dasystem['background.co2.fossil.flux']))
#mapped_parameters = np.array(file.get_variable(dacycle.dasystem['final.param.mean.1x1']))
if dacycle.dasystem['background.co2.biosam.flux'] in file.variables.keys():
if dacycle.dasystem['background.co2.biosam.flux'] in list(file.variables.keys()):
sam = True
biosam = np.array(file.get_variable(dacycle.dasystem['background.co2.biosam.flux']))
firesam = np.array(file.get_variable(dacycle.dasystem['background.co2.firesam.flux']))
......@@ -555,7 +555,7 @@ def save_weekly_avg_tc_data(dacycle, statevector):
# Now convert other variables that were inside the flux_1x1 file
vardict = ncf_in.variables
for vname, vprop in vardict.iteritems():
for vname, vprop in vardict.items():
data = ncf_in.get_variable(vname)[index]
......@@ -680,7 +680,7 @@ def save_weekly_avg_ext_tc_data(dacycle):
# Now convert other variables that were inside the tcfluxes.nc file
vardict = ncf_in.variables
for vname, vprop in vardict.iteritems():
for vname, vprop in vardict.items():
data = ncf_in.get_variable(vname)[index]
......@@ -899,7 +899,7 @@ def save_weekly_avg_agg_data(dacycle, region_aggregate='olson'):
# Now convert other variables that were inside the statevector file
vardict = ncf_in.variables
for vname, vprop in vardict.iteritems():
for vname, vprop in vardict.items():
if vname == 'latitude': continue
elif vname == 'longitude': continue
elif vname == 'date': continue
......@@ -1014,7 +1014,7 @@ def save_time_avg_data(dacycle, infile, avg='monthly'):
pass
file = io.ct_read(infile, 'read')
datasets = file.variables.keys()
datasets = list(file.variables.keys())
date = file.get_variable('date')
globatts = file.ncattrs()
......@@ -1042,7 +1042,7 @@ def save_time_avg_data(dacycle, infile, avg='monthly'):
for d in vardims:
if 'date' in d:
continue
if d in ncf.dimensions.keys():
if d in list(ncf.dimensions.keys()):
pass
else:
dim = ncf.createDimension(d, size=len(file.dimensions[d]))
......@@ -1072,7 +1072,7 @@ def save_time_avg_data(dacycle, infile, avg='monthly'):
time_avg = [time_avg]
data_avg = [data_avg]
else:
raise ValueError, 'Averaging (%s) does not exist' % avg
raise ValueError('Averaging (%s) does not exist' % avg)
count = -1
for dd, data in zip(time_avg, data_avg):
......
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu.
This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation,
version 3. This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program. If not, see <http://www.gnu.org/licenses/>."""
#!/usr/bin/env python
# expand_fluxes.py
import sys
import os
sys.path.append('../../')
rootdir = os.getcwd().split('da/')[0]
analysisdir = os.path.join(rootdir, 'da/analysis')
from datetime import datetime, timedelta
import logging
import numpy as np
from da.tools.general import date2num, num2date
import da.tools.io4 as io
from da.analysis.tools_regions import globarea, state_to_grid
from da.tools.general import create_dirs
from da.analysis.tools_country import countryinfo # needed here
from da.analysis.tools_transcom import transcommask, ExtendedTCRegions
import netCDF4 as cdf
import da.analysis.tools_transcom as tc
import da.analysis.tools_country as ct
import da.analysis.tools_time as timetools
"""
Author: Wouter Peters (Wouter.Peters@noaa.gov)
Revision History:
File created on 21 Ocotber 2008.
"""
def proceed_dialog(txt, yes=['y', 'yes'], all=['a', 'all', 'yes-to-all']):
""" function to ask whether to proceed or not """
response = raw_input(txt)
if response.lower() in yes:
return 1
if response.lower() in all:
return 2
return 0
def save_weekly_avg_1x1_data(dacycle, statevector):
"""
Function creates a NetCDF file with output on 1x1 degree grid. It uses the flux data written by the
:class:`~da.baseclasses.obsoperator.ObsOperator.py`, and multiplies these with the mapped parameters and
variance (not covariance!) from the :class:`~da.baseclasses.statevector.StateVector`.
:param dacycle: a :class:`~da.tools.initexit.CycleControl` object
:param statevector: a :class:`~da.baseclasses.statevector.StateVector`
:rtype: None
"""
#
dirname = create_dirs(os.path.join(dacycle['dir.analysis'], 'data_flux1x1_weekly'))
#
# Some help variables
#
dectime0 = date2num(datetime(2000, 1, 1))
dt = dacycle['cyclelength']
startdate = dacycle['time.start']
enddate = dacycle['time.end']
nlag = statevector.nlag
logging.debug("DA Cycle start date is %s" % startdate.strftime('%Y-%m-%d %H:%M'))
logging.debug("DA Cycle end date is %s" % enddate.strftime('%Y-%m-%d %H:%M'))
#
# Create or open NetCDF output file
#
saveas = os.path.join(dirname, 'flux_1x1.%s.nc' % startdate.strftime('%Y-%m-%d'))
ncf = io.CT_CDF(saveas, 'write')
#
# Create dimensions and lat/lon grid
#
dimgrid = ncf.add_latlon_dim()
dimensemble = ncf.add_dim('members', statevector.nmembers)
dimdate = ncf.add_date_dim()
#
# set title and tell GMT that we are using "pixel registration"
#
setattr(ncf, 'Title', 'CarbonTracker fluxes')
setattr(ncf, 'node_offset', 1)
#
# skip dataset if already in file
#
ncfdate = date2num(startdate) - dectime0 + dt.days / 2.0
skip = ncf.has_date(ncfdate)
if skip:
logging.warning('Skipping writing of data for date %s : already present in file %s' % (startdate.strftime('%Y-%m-%d'), saveas))
else:
#
# if not, process this cycle. Start by getting flux input data from CTDAS
#
filename = os.path.join(dacycle['dir.output'], 'flux1x1_%s_%s.nc' % (startdate.strftime('%Y%m%d%H'), enddate.strftime('%Y%m%d%H')))
file = io.ct_read(filename, 'read')
bio = np.array(file.get_variable(dacycle.dasystem['background.co2.bio.flux']))
ocean = np.array(file.get_variable(dacycle.dasystem['background.co2.ocean.flux']))
fire = np.array(file.get_variable(dacycle.dasystem['background.co2.fires.flux']))
fossil = np.array(file.get_variable(dacycle.dasystem['background.co2.fossil.flux']))
#mapped_parameters = np.array(file.get_variable(dacycle.dasystem['final.param.mean.1x1']))
if dacycle.dasystem['background.co2.biosam.flux'] in file.variables.keys():
sam = True
biosam = np.array(file.get_variable(dacycle.dasystem['background.co2.biosam.flux']))
firesam = np.array(file.get_variable(dacycle.dasystem['background.co2.firesam.flux']))
else: sam = False
file.close()
if sam:
bio = bio + biosam
fire = fire + firesam
next = ncf.inq_unlimlen()[0]
# Start adding datasets from here on, both prior and posterior datasets for bio and ocn
for prior in [True, False]:
#
# Now fill the statevector with the prior values for this time step. Note that the prior value for this time step
# occurred nlag time steps ago, so we make a shift in the output directory, but only if we are more than nlag cycle away from the start date..
#
if prior:
qual_short = 'prior'
for n in range(nlag, 0, -1):
priordate = startdate + n*dt - timedelta(dt.days * n)
savedir = dacycle['dir.output'].replace(startdate.strftime('%Y%m%d'), priordate.strftime('%Y%m%d'))
filename = os.path.join(savedir, 'savestate_%s.nc' % priordate.strftime('%Y%m%d'))
if os.path.exists(filename):
statevector.read_from_file(filename, qual=qual_short)
gridmean, gridensemble = statevector.state_to_grid(lag=n)
# Replace the mean statevector by all ones (assumed priors)
gridmean = statevector.vector2grid(vectordata=np.ones(statevector.nparams,))
logging.debug('Read prior dataset from file %s, sds %d: ' % (filename, n))
break
else:
qual_short = 'opt'
savedir = dacycle['dir.output']
filename = os.path.join(savedir, 'savestate_%s.nc' % startdate.strftime('%Y%m%d'))
statevector.read_from_file(filename, qual=qual_short)
gridmean, gridensemble = statevector.state_to_grid(lag=1)
logging.debug('Read posterior dataset from file %s, sds %d: ' % (filename, 1))
#
# if prior, do not multiply fluxes with parameters, otherwise do
#
print gridensemble.shape, bio.shape, gridmean.shape
biomapped = bio * gridmean
oceanmapped = ocean * gridmean
biovarmapped = bio * gridensemble
oceanvarmapped = ocean * gridensemble
#
#
# For each dataset, get the standard definitions from the module mysettings, add values, dimensions, and unlimited count, then write
#
savedict = ncf.standard_var(varname='bio_flux_' + qual_short)
savedict['values'] = biomapped.tolist()
savedict['dims'] = dimdate + dimgrid
savedict['count'] = next
ncf.add_data(savedict)
#
savedict = ncf.standard_var(varname='ocn_flux_' + qual_short)
savedict['values'] = oceanmapped.tolist()
savedict['dims'] = dimdate + dimgrid
savedict['count'] = next
ncf.add_data(savedict)
print biovarmapped.shape
savedict = ncf.standard_var(varname='bio_flux_%s_ensemble' % qual_short)
savedict['values'] = biovarmapped.tolist()
savedict['dims'] = dimdate + dimensemble + dimgrid
savedict['count'] = next
ncf.add_data(savedict)
#
savedict = ncf.standard_var(varname='ocn_flux_%s_ensemble' % qual_short)
savedict['values'] = oceanvarmapped.tolist()
savedict['dims'] = dimdate + dimensemble + dimgrid
savedict['count'] = next
ncf.add_data(savedict)
# End prior/posterior block
savedict = ncf.standard_var(varname='fire_flux_imp')
savedict['values'] = fire.tolist()
savedict['dims'] = dimdate + dimgrid
savedict['count'] = next
ncf.add_data(savedict)
#
savedict = ncf.standard_var(varname='fossil_flux_imp')
savedict['values'] = fossil.tolist()
savedict['dims'] = dimdate + dimgrid
savedict['count'] = next
ncf.add_data(savedict)
area = globarea()
savedict = ncf.standard_var(varname='cell_area')
savedict['values'] = area.tolist()
savedict['dims'] = dimgrid
ncf.add_data(savedict)
#
savedict = ncf.standard_var(varname='date')
savedict['values'] = date2num(startdate) - dectime0 + dt.days / 2.0
savedict['dims'] = dimdate
savedict['count'] = next
ncf.add_data(savedict)
sys.stdout.write('.')
sys.stdout.flush()
#
# Done, close the new NetCDF file
#
ncf.close()
#
# Return the full name of the NetCDF file so it can be processed by the next routine
#
logging.info("Gridded weekly average fluxes now written")
return saveas
def save_weekly_avg_state_data(dacycle, statevector):
"""
Function creates a NetCDF file with output for all parameters. It uses the flux data written by the
:class:`~da.baseclasses.obsoperator.ObsOperator.py`, and multiplies these with the mapped parameters and
variance (not covariance!) from the :class:`~da.baseclasses.statevector.StateVector`.
:param dacycle: a :class:`~da.tools.initexit.CycleControl` object
:param statevector: a :class:`~da.baseclasses.statevector.StateVector`
:rtype: None
"""
dirname = create_dirs(os.path.join(dacycle['dir.analysis'], 'data_state_weekly'))
#
# Some help variables
#
dectime0 = date2num(datetime(2000, 1, 1))
dt = dacycle['cyclelength']
startdate = dacycle['time.start']
enddate = dacycle['time.end']
nlag = statevector.nlag
area = globarea()
vectorarea = statevector.grid2vector(griddata=area, method='sum')
logging.debug("DA Cycle start date is %s" % startdate.strftime('%Y-%m-%d %H:%M'))
logging.debug("DA Cycle end date is %s" % enddate.strftime('%Y-%m-%d %H:%M'))
#
# Create or open NetCDF output file
#
saveas = os.path.join(dirname, 'statefluxes.nc')
ncf = io.CT_CDF(saveas, 'write')
#
# Create dimensions and lat/lon grid
#
dimregs = ncf.add_dim('nparameters', statevector.nparams)
dimmembers = ncf.add_dim('nmembers', statevector.nmembers)
dimdate = ncf.add_date_dim()
#
# set title and tell GMT that we are using "pixel registration"
#
setattr(ncf, 'Title', 'CarbonTracker fluxes')
setattr(ncf, 'node_offset', 1)
#
# skip dataset if already in file
#
ncfdate = date2num(startdate) - dectime0 + dt.days / 2.0
skip = ncf.has_date(ncfdate)
if skip:
logging.warning('Skipping writing of data for date %s : already present in file %s' % (startdate.strftime('%Y-%m-%d'), saveas))
else:
next = ncf.inq_unlimlen()[0]
#
# if not, process this cycle. Start by getting flux input data from CTDAS
#
filename = os.path.join(dacycle['dir.output'], 'flux1x1_%s_%s.nc' % (startdate.strftime('%Y%m%d%H'), enddate.strftime('%Y%m%d%H')))
file = io.ct_read(filename, 'read')
bio = np.array(file.get_variable(dacycle.dasystem['background.co2.bio.flux']))
ocean = np.array(file.get_variable(dacycle.dasystem['background.co2.ocean.flux']))