diff --git a/da/__init__.py b/da/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/da/analysis/__init__.py b/da/analysis/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/da/analysis/expand_fluxes.py b/da/analysis/expand_fluxes.py
deleted file mode 100755
index b00c0981de83f294df623ee1baabe5c5370c55c3..0000000000000000000000000000000000000000
--- a/da/analysis/expand_fluxes.py
+++ /dev/null
@@ -1,860 +0,0 @@
-#!/usr/bin/env python
-# expand_fluxes.py
-import sys
-sys.path.append('../../')
-import os
-import getopt
-from datetime import datetime, timedelta
-from da.tools.general import CreateDirs
-import numpy as np
-from pylab import date2num, num2date
-
-"""
-Author: Wouter Peters (Wouter.Peters@noaa.gov)
-
-Revision History:
-File created on 21 Ocotber 2008.
-
-"""
-
-def proceed_dialog(txt, yes=['y','yes'], all=['a','all', 'yes-to-all']):
-    """ function to ask whether to proceed or not """
-    response=raw_input(txt)
-    if response.lower() in yes:
-       return 1
-    if response.lower() in all:
-       return 2
-    return 0
-
-def SaveWeeklyAvg1x1Data(DaCycle, StateVector):
-    """
-        Function creates a NetCDF file with output on 1x1 degree grid. It uses the flux data written by the 
-        :class:`~da.baseclasses.obsoperator.ObsOperator.py`, and multiplies these with the mapped parameters and
-        variance (not covariance!) from the :class:`~da.baseclasses.statevector.StateVector`.
-        
-           :param DaCycle: a :class:`~da.tools.initexit.CycleControl` object
-           :param StateVector: a :class:`~da.baseclasses.statevector.StateVector`
-           :rtype: None
-    """
-
-    import da.tools.io4 as io
-    import logging
-#
-    dirname     = 'data_flux1x1_weekly'
-#
-    dirname     = CreateDirs(os.path.join(DaCycle['dir.analysis'],dirname))
-#
-# Some help variables
-#
-    dectime0    = date2num(datetime(2000,1,1))
-    dt          = DaCycle['cyclelength']
-    startdate   = DaCycle['time.start'] 
-    enddate     = DaCycle['time.end'] 
-    nlag        = StateVector.nlag
-
-    msg = "DA Cycle start date is %s"   % startdate.strftime('%Y-%m-%d %H:%M')      ; logging.debug(msg)
-    msg = "DA Cycle end   date is %s"   % enddate.strftime('%Y-%m-%d %H:%M')        ; logging.debug(msg)
-
-#
-# Create or open NetCDF output file
-#
-    saveas      = os.path.join(dirname,'flux_1x1.nc')
-    ncf         = io.CT_CDF(saveas,'write')
-
-#
-# Create dimensions and lat/lon grid
-#
-    dimgrid     = ncf.AddLatLonDim()
-    dimdate     = ncf.AddDateDim()
-#
-# set title and tell GMT that we are using "pixel registration"
-#
-    setattr(ncf,'Title','CarbonTracker fluxes')
-    setattr(ncf,'node_offset',1)
-#
-# skip dataset if already in file
-#
-    ncfdate = date2num(startdate)-dectime0+dt.days/2.0
-    skip    = ncf.has_date(ncfdate)
-    if skip:
-        msg = 'Skipping writing of data for date %s : already present in file %s' % (startdate.strftime('%Y-%m-%d'),saveas) ; logging.warning(msg)
-    else:
-        
-#
-# if not, process this cycle. Start by getting flux input data from CTDAS
-#
-        filename = os.path.join(DaCycle['dir.output'],'flux1x1_%s_%s.nc'%(startdate.strftime('%Y%m%d%H'),enddate.strftime('%Y%m%d%H'),) )
-
-        file                = io.CT_Read(filename,'read')
-        bio                 = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.bio.flux']))
-        ocean               = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.ocean.flux']))
-        fire                = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.fires.flux']))
-        fossil              = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.fossil.flux']))
-        #mapped_parameters   = np.array(file.GetVariable(DaCycle.DaSystem['final.param.mean.1x1']))
-        file.close()
-
-        next=ncf.inq_unlimlen()[0]
-
-
-# Start adding datasets from here on, both prior and posterior datasets for bio and ocn
-
-        for prior in [True, False]:
-#
-# Now fill the StateVector with the prior values for this time step. Note that the prior value for this time step
-# occurred nlag time steps ago, so we make a shift in the output directory, but only if we are more than nlag cycle away from the start date..
-#
-
-            if prior:
-                qual_short='prior'
-                for n in range(nlag,0,-1):
-                    priordate       = enddate - timedelta(dt.days*n)
-                    savedir         = DaCycle['dir.output'].replace(startdate.strftime('%Y%m%d'),priordate.strftime('%Y%m%d') )
-                    filename        = os.path.join(savedir,'savestate.nc')
-                    if os.path.exists(filename):
-                        dummy                   = StateVector.ReadFromFile(filename)
-                        gridmean,gridvariance   = StateVector.StateToGrid(lag=n)
-
-                        msg = 'Read prior dataset from file %s, sds %d: '%(filename,n) ; logging.debug(msg)
-                        break
-            else:
-                qual_short='opt'
-                savedir                 = DaCycle['dir.output']
-                filename                = os.path.join(savedir,'savestate.nc')
-                dummy                   = StateVector.ReadFromFile(filename)
-                gridmean,gridvariance   = StateVector.StateToGrid(lag=nlag)
-
-                msg = 'Read posterior dataset from file %s, sds %d: '%(filename,nlag) ; logging.debug(msg)
-#
-# if prior, do not multiply fluxes with parameters, otherwise do
-#
-            biomapped=bio*gridmean 
-            oceanmapped=ocean*gridmean 
-            biovarmapped=bio*gridvariance
-            oceanvarmapped=ocean*gridvariance
-
-#
-#
-#  For each dataset, get the standard definitions from the module mysettings, add values, dimensions, and unlimited count, then write
-#
-            savedict=ncf.StandardVar(varname='bio_flux_'+qual_short)
-            savedict['values']=biomapped.tolist()
-            savedict['dims']=dimdate+dimgrid
-            savedict['count']=next
-            ncf.AddData(savedict)
-#
-            savedict=ncf.StandardVar(varname='ocn_flux_'+qual_short)
-            savedict['values']=oceanmapped.tolist()
-            savedict['dims']=dimdate+dimgrid
-            savedict['count']=next
-            ncf.AddData(savedict)
-
-            savedict=ncf.StandardVar(varname='bio_flux_%s_cov'%qual_short)
-            savedict['values']=biovarmapped.tolist()
-            savedict['dims']=dimdate+dimgrid
-            savedict['count']=next
-            ncf.AddData(savedict)
-#
-            savedict=ncf.StandardVar(varname='ocn_flux_%s_cov'%qual_short)
-            savedict['values']=oceanvarmapped.tolist()
-            savedict['dims']=dimdate+dimgrid
-            savedict['count']=next
-            ncf.AddData(savedict)
-
-        # End prior/posterior block
-
-        savedict=ncf.StandardVar(varname='fire_flux_imp')
-        savedict['values']=fire.tolist()
-        savedict['dims']=dimdate+dimgrid
-        savedict['count']=next
-        ncf.AddData(savedict)
-#
-        savedict=ncf.StandardVar(varname='fossil_flux_imp')
-        savedict['values']=fossil.tolist()
-        savedict['dims']=dimdate+dimgrid
-        savedict['count']=next
-        ncf.AddData(savedict)
-#
-        savedict=ncf.StandardVar(varname='date')
-        savedict['values']=date2num(startdate)-dectime0+dt.days/2.0
-        savedict['dims']=dimdate
-        savedict['count']=next
-        ncf.AddData(savedict)
-
-        sys.stdout.write('.')
-        sys.stdout.flush()
-#
-#   Done, close the new NetCDF file
-#
-    ncf.close()
-#
-#   Return the full name of the NetCDF file so it can be processed by the next routine
-#
-    msg="Gridded weekly average fluxes now written" ; logging.info(msg)
-
-    return saveas
-
-def SaveWeeklyAvgStateData(DaCycle, StateVector):
-    """
-        Function creates a NetCDF file with output for all parameters. It uses the flux data written by the 
-        :class:`~da.baseclasses.obsoperator.ObsOperator.py`, and multiplies these with the mapped parameters and
-        variance (not covariance!) from the :class:`~da.baseclasses.statevector.StateVector`.
-        
-           :param DaCycle: a :class:`~da.tools.initexit.CycleControl` object
-           :param StateVector: a :class:`~da.baseclasses.statevector.StateVector`
-           :rtype: None
-    """
-
-    import da.tools.io4 as io
-    import logging
-    from da.analysis.tools_regions import globarea
-    
-#
-    dirname     = 'data_state_weekly'
-#
-    dirname     = CreateDirs(os.path.join(DaCycle['dir.analysis'],dirname))
-#
-# Some help variables
-#
-    dectime0    = date2num(datetime(2000,1,1))
-    dt          = DaCycle['cyclelength']
-    startdate   = DaCycle['time.start'] 
-    enddate     = DaCycle['time.end'] 
-    nlag        = StateVector.nlag
-
-    area        = globarea()
-    vectorarea  = StateVector.GridToVector(griddata=area,method='sum')
-
-    msg = "DA Cycle start date is %s"   % startdate.strftime('%Y-%m-%d %H:%M')      ; logging.debug(msg)
-    msg = "DA Cycle end   date is %s"   % enddate.strftime('%Y-%m-%d %H:%M')        ; logging.debug(msg)
-
-#
-# Create or open NetCDF output file
-#
-    saveas      = os.path.join(dirname,'statefluxes.nc')
-    ncf         = io.CT_CDF(saveas,'write')
-
-#
-# Create dimensions and lat/lon grid
-#
-    dimregs     = ncf.AddDim('nparameters',StateVector.nparams)
-    dimmembers  = ncf.AddDim('nmembers',StateVector.nmembers)
-    dimdate     = ncf.AddDateDim()
-#
-# set title and tell GMT that we are using "pixel registration"
-#
-    setattr(ncf,'Title','CarbonTracker fluxes')
-    setattr(ncf,'node_offset',1)
-#
-# skip dataset if already in file
-#
-    ncfdate = date2num(startdate)-dectime0+dt.days/2.0
-    skip    = ncf.has_date(ncfdate)
-    if skip:
-        msg = 'Skipping writing of data for date %s : already present in file %s' % (startdate.strftime('%Y-%m-%d'),saveas) ; logging.warning(msg)
-    else:
-
-        next=ncf.inq_unlimlen()[0]
-
-#
-# if not, process this cycle. Start by getting flux input data from CTDAS
-#
-        filename = os.path.join(DaCycle['dir.output'],'flux1x1_%s_%s.nc'%(startdate.strftime('%Y%m%d%H'),enddate.strftime('%Y%m%d%H'),) )
-
-        file                = io.CT_Read(filename,'read')
-        bio                 = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.bio.flux']))
-        ocean               = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.ocean.flux']))
-        fire                = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.fires.flux']))
-        fossil              = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.fossil.flux']))
-        #mapped_parameters   = np.array(file.GetVariable(DaCycle.DaSystem['final.param.mean.1x1']))
-        file.close()
-
-        next=ncf.inq_unlimlen()[0]
-
-        vectorbio           = StateVector.GridToVector(griddata=bio*area,method='sum')
-        vectorocn           = StateVector.GridToVector(griddata=ocean*area,method='sum')
-        vectorfire          = StateVector.GridToVector(griddata=fire*area,method='sum')
-        vectorfossil        = StateVector.GridToVector(griddata=fossil*area,method='sum')
-
-
-# Start adding datasets from here on, both prior and posterior datasets for bio and ocn
-
-        for prior in [True, False]:
-#
-# Now fill the StateVector with the prior values for this time step. Note that the prior value for this time step
-# occurred nlag time steps ago, so we make a shift in the output directory, but only if we are more than nlag cycle away from the start date..
-#
-
-            if prior:
-                qual_short='prior'
-                for n in range(nlag,0,-1):
-                    priordate       = enddate - timedelta(dt.days*n)
-                    savedir         = DaCycle['dir.output'].replace(startdate.strftime('%Y%m%d'),priordate.strftime('%Y%m%d') )
-                    filename        = os.path.join(savedir,'savestate.nc')
-                    if os.path.exists(filename):
-                        dummy                   = StateVector.ReadFromFile(filename)
-                        choicelag               = n
-
-                        msg = 'Read prior dataset from file %s, sds %d: '%(filename,n) ; logging.debug(msg)
-                        break
-            else:
-                qual_short='opt'
-                savedir                 = DaCycle['dir.output']
-                filename                = os.path.join(savedir,'savestate.nc')
-                dummy                   = StateVector.ReadFromFile(filename)
-                choicelag               = nlag
-
-                msg = 'Read posterior dataset from file %s, sds %d: '%(filename,nlag) ; logging.debug(msg)
-#
-# if prior, do not multiply fluxes with parameters, otherwise do
-#
-            data                = StateVector.EnsembleMembers[choicelag-1][0].ParameterValues*vectorbio # units of mole region-1 s-1
-
-            savedict            = ncf.StandardVar(varname='bio_flux_%s'%qual_short)
-            savedict['values']  = data
-            savedict['dims']    = dimdate+dimregs
-            savedict['count']   = next
-            ncf.AddData(savedict)
-
-            members             = StateVector.EnsembleMembers[choicelag-1] 
-            deviations          = np.array([mem.ParameterValues*data for mem in members])
-
-            savedict=ncf.StandardVar(varname='bio_flux_%s_cov'%qual_short)
-
-            savedict['values']=deviations.tolist()
-            savedict['dims']=dimdate+dimmembers+dimregs
-            savedict['comment']="This is the matrix square root, use (M x M^T)/(nmembers-1) to make covariance"
-            savedict['units']="mol region-1 s-1"
-            savedict['count']=next
-            ncf.AddData(savedict)
-
-            savedict=ncf.StandardVar('unknown')
-            savedict['name'] = 'bio_flux_%s_std'%qual_short
-            savedict['long_name'] = 'Biosphere flux standard deviation, %s'%qual_short
-            savedict['values']=deviations.std(axis=0)
-            savedict['dims']=dimdate+dimregs
-            savedict['comment']="This is the standard deviation on each parameter"
-            savedict['units']="mol region-1 s-1"
-            savedict['count']=next
-            ncf.AddData(savedict)
-
-            data                = StateVector.EnsembleMembers[choicelag-1][0].ParameterValues*vectorocn # units of mole region-1 s-1
-
-            savedict=ncf.StandardVar(varname='ocn_flux_%s'%qual_short)
-            savedict['values']=data
-            savedict['dims']=dimdate+dimregs
-            savedict['count']=next
-            ncf.AddData(savedict)
-
-            deviations          = np.array([mem.ParameterValues*data for mem in members])
-
-            savedict=ncf.StandardVar(varname='ocn_flux_%s_cov'%qual_short)
-            savedict['values']=deviations.tolist()
-            savedict['dims']=dimdate+dimmembers+dimregs
-            savedict['comment']="This is the matrix square root, use (M x M^T)/(nmembers-1) to make covariance"
-            savedict['units']="mol region-1 s-1"
-            savedict['count']=next
-            ncf.AddData(savedict)
-
-            savedict=ncf.StandardVar('unknown')
-            savedict['name'] = 'ocn_flux_%s_std'%qual_short
-            savedict['long_name'] = 'Ocean flux standard deviation, %s'%qual_short
-            savedict['values']=deviations.std(axis=0)
-            savedict['dims']=dimdate+dimregs
-            savedict['comment']="This is the standard deviation on each parameter"
-            savedict['units']="mol region-1 s-1"
-            savedict['count']=next
-            ncf.AddData(savedict)
-
-        data                = vectorfire
-
-        savedict=ncf.StandardVar(varname='fire_flux_imp')
-        savedict['values']=data
-        savedict['dims']=dimdate+dimregs
-        savedict['count']=next
-        ncf.AddData(savedict)
-
-        data                = vectorfossil
-
-        savedict=ncf.StandardVar(varname='fossil_flux_imp')
-        savedict['values']=data
-        savedict['dims']=dimdate+dimregs
-        savedict['count']=next
-        ncf.AddData(savedict)
-
-        savedict=ncf.StandardVar(varname='date')
-        savedict['values']  = ncfdate
-        savedict['dims']    = dimdate
-        savedict['count']   = next
-        ncf.AddData(savedict)
-
-        sys.stdout.write('.')
-        sys.stdout.flush()
-#
-#   Done, close the new NetCDF file
-#
-    ncf.close()
-#
-#   Return the full name of the NetCDF file so it can be processed by the next routine
-#
-    msg="Vector weekly average fluxes now written" ; logging.info(msg)
-
-    return saveas
-
-
-def SaveWeeklyAvgTCData(DaCycle, StateVector):
-    """
-        Function creates a NetCDF file with output on TransCom regions. It uses the flux input from the 
-        function `SaveWeeklyAvg1x1Data` to create fluxes of length `nparameters`, which are then projected
-        onto TC regions using the internal methods from :class:`~da.baseclasses.statevector.StateVector`.
-        
-           :param DaCycle: a :class:`~da.tools.initexit.CycleControl` object
-           :param StateVector: a :class:`~da.baseclasses.statevector.StateVector`
-           :rtype: None
-
-        This function only read the prior fluxes from the flux_1x1.nc files created before, because we want to convolve 
-        these with the parameters in the StateVector. This creates posterior fluxes, and the posterior covariance for the complete
-        StateVector in units of mol/box/s which we then turn into TC fluxes and covariances.
-    """
-    from da.analysis.tools_regions import globarea, StateToGrid
-    from da.analysis.tools_transcom import StateToTranscom, StateCovToTranscom, transcommask
-    import da.tools.io4 as io
-    import logging
-#
-    dirname     = 'data_tc_weekly'
-#
-    dirname     = CreateDirs(os.path.join(DaCycle['dir.analysis'],dirname))
-#
-# Some help variables
-#
-    dectime0    = date2num(datetime(2000,1,1))
-    dt          = DaCycle['cyclelength']
-    startdate   = DaCycle['time.start'] 
-    enddate     = DaCycle['time.end'] 
-    ncfdate     = date2num(startdate)-dectime0+dt.days/2.0
-
-    msg = "DA Cycle start date is %s"   % startdate.strftime('%Y-%m-%d %H:%M')      ; logging.debug(msg)
-    msg = "DA Cycle end   date is %s"   % enddate.strftime('%Y-%m-%d %H:%M')        ; logging.debug(msg)
-
-    # Write/Create NetCDF output file
-    #
-    saveas          = os.path.join(dirname,'tcfluxes.nc')
-    ncf             = io.CT_CDF(saveas,'write')
-    dimdate         = ncf.AddDateDim()
-    dimidateformat  = ncf.AddDateDimFormat()
-    dimregs         = ncf.AddRegionDim(type='tc')
-#
-# set title and tell GMT that we are using "pixel registration"
-#
-    setattr(ncf,'Title','CarbonTracker TransCom fluxes')
-    setattr(ncf,'node_offset',1)
-    #
-
-    skip             = ncf.has_date(ncfdate)
-    if skip:
-        msg = 'Skipping writing of data for date %s : already present in file %s' % (startdate.strftime('%Y-%m-%d'),saveas) ; logging.warning(msg)
-    else:
-
-        # Get input data
-
-        area=globarea()
-
-        infile=os.path.join(DaCycle['dir.analysis'],'data_state_weekly','statefluxes.nc')
-        if not os.path.exists(infile):
-            msg="Needed input file (%s) does not exist yet, please create file first, returning..."%infile ; logging.error(msg)
-            return None
-
-        ncf_in      = io.CT_Read(infile,'read')
-
-        # Transform data one by one
-
-        # Get the date variable, and find index corresponding to the DaCycle date
-
-        try:
-            dates       = ncf_in.variables['date'][:]
-        except KeyError:
-            msg         = "The variable date cannot be found in the requested input file (%s) " % infile ; logging.error(msg)
-            msg         = "Please make sure you create gridded fluxes before making TC fluxes " ; logging.error(msg)
-            raise KeyError
-
-        try:
-            index       = dates.tolist().index(ncfdate)
-        except ValueError:
-            msg         = "The requested cycle date is not yet available in file %s "% infile ; logging.error(msg)
-            msg         = "Please make sure you create state based fluxes before making TC fluxes " ; logging.error(msg)
-            raise ValueError
-
-        # First add the date for this cycle to the file, this grows the unlimited dimension
-
-        savedict            = ncf.StandardVar(varname='date')
-        savedict['values']  = ncfdate
-        savedict['dims']    = dimdate
-        savedict['count']   = index
-        dummy               = ncf.AddData(savedict)
-
-        # Now convert other variables that were inside the flux_1x1 file
-
-        vardict     = ncf_in.variables
-        for vname, vprop in vardict.iteritems():
-
-            data    = ncf_in.GetVariable(vname)[index]
-
-            savedict            = ncf.StandardVar(varname=vname)
-
-            if vname=='latitude': continue
-            elif vname=='longitude': continue
-            elif vname=='date': continue
-            elif vname=='idate': continue
-            elif 'cov' in vname:
-                
-                cov                 = data.transpose().dot(data) 
-                tcdata              = StateVector.VectorToTC(vectordata=cov,cov=True) # vector to TC
-
-                savedict['units']   = '[mol/region/s]**2'
-                savedict['dims']    = dimdate+dimregs+dimregs
-                
-            else:
-
-                tcdata              = StateVector.VectorToTC(vectordata=data) # vector to TC
-
-                savedict['dims']    = dimdate+dimregs
-                savedict['units']   = 'mol/region/s'
-
-            savedict['values']  = tcdata
-            ncf.AddData(savedict)
-
-        ncf_in.close()
-    ncf.close()
-
-    msg="TransCom weekly average fluxes now written" ; logging.info(msg)
-
-    return saveas
-
-def SaveTCDataExt(rundat):
-    """ Function SaveTCDataExt saves surface flux data to NetCDF files for extended TransCom regions
-        
-        *** Inputs ***
-        rundat : a RunInfo object
-
-        *** Outputs ***
-        NetCDF file containing n-hourly global surface fluxes per TransCom region
-
-        *** Example ***
-        ./expand_savestate project=enkf_release sd=20000101 ed=20010101 """
-    from da.analysis.tools_transcom import StateCovToGrid, transcommask, ExtendedTCRegions
-    import da.tools.io4 as io
-
-    infile=os.path.join(rundat.outputdir,'data_tc_weekly','tcfluxes.nc')
-    if not os.path.exists(infile):
-        print "Needed input file (%s) does not exist yet, please create weekly tc flux files first, returning..."%infile
-        return None
-
-    # Create NetCDF output file
-    #
-    saveas          = os.path.join(rundat.outputdir,'data_tc_weekly','tc_extfluxes.nc')
-    ncf             = io.CT_CDF(saveas,'create')
-    dimdate         = ncf.AddDateDim()
-    dimidateformat  = ncf.AddDateDimFormat()
-    dimregs         = ncf.AddRegionDim(type='tc_ext')
-
-
-    ncf_in      = io.CT_Read(infile,'read')
-    vardict     = ncf_in.variables
-    for vname, vprop in vardict.iteritems():
-
-        data    = ncf_in.GetVariable(vname)[:]
-
-        if vname=='latitude': continue
-        elif vname=='longitude': continue
-        elif vname=='date': dims=dimdate
-        elif vname=='idate': dims=dimdate+dimidateformat
-
-        if vname not in ['date','idate']:
-
-                if 'cov' in vname: 
-                    alldata=[] 
-                    for dd in data:
-                        dd=StateCovToGrid(dd*area,transcommask,reverse=True)
-                        alldata.append(dd)
-                    data=array(alldata) 
-                    dims=dimdate+dimregs+dimregs
-                else:
-                    data=ExtendedTCRegions(data)
-                    dims=dimdate+dimregs
-
-        print vname,data.shape
-        savedict            = ncf.StandardVar(varname=vname)
-        savedict['values']  = data.tolist()
-        savedict['dims']    = dims
-        savedict['units']   = 'mol/region/s'
-        savedict['count']   = 0
-        ncf.AddData(savedict,nsets=data.shape[0])
-         
-    ncf.close()
-
-    return saveas
-
-def SaveEcoDataExt(rundat):
-    """ Function SaveEcoDataExt saves surface flux data to NetCDF files for extended ecoregions
-        
-        *** Inputs ***
-        rundat : a RunInfo object
-
-        *** Outputs ***
-        NetCDF file containing n-hourly global surface fluxes per TransCom region
-
-        *** Example ***
-        ./expand_savestate project=enkf_release sd=20000101 ed=20010101 """
-    from ecotools import xte
-    from da.analysis.tools_transcom import LookUpName
-    import pycdf as CDF
-
-    infile=os.path.join(rundat.outputdir,'data_eco_weekly','ecofluxes.nc')
-    if not os.path.exists(infile):
-        print "Needed input file (%s) does not exist yet, please create weekly ecoflux files first, returning..."%infile
-        return None
-
-    # Create NetCDF output file
-    #
-    saveas=os.path.join(rundat.outputdir,'data_eco_weekly','eco_extfluxes.nc')
-    ncf=CT_CDF(saveas,'create')
-    dimdate=ncf.AddDateDim()
-    dimidateformat=ncf.AddDateDimFormat()
-    dimregs         = ncf.AddRegionDim(type='tc_ext')
-
-
-    ncf_in=CDF.CDF(infile)
-    vardict=ncf_in.variables()
-    for vname, vprop in vardict.iteritems():
-        dims=()
-        data=array(ncf_in.var(vname)[:])
-        atts=ncf_in.var(vname).attributes()
-        if vname not in ['date','idate']:
-                if 'cov' in vname: 
-                    data=xte(data[:,0:rundat.n_land,0:rundat.n_land],cov=True)
-                    dims=dimdate+dimregs+dimregs
-                else: 
-                    data=xte(data[:,0:rundat.n_land])
-                    dims=dimdate+dimregs
-        elif vname=='date': dims=dimdate
-        elif vname=='idate': dims=dimdate+dimidateformat
-        else:
-             print 'Dataset with unknown dimensions encountered in file: %s'%vname
-
-        savedict=ncf.StandardVar(varname=vname)
-        savedict['units']=atts['units']
-        savedict['values']=data.tolist()
-        savedict['dims']=dims
-        ncf.AddData(savedict,nsets=data.shape[0])
-         
-    ncf.close()
-
-    return saveas
-
-def SaveTimeAvgData(rundat,infile,avg='monthly'):
-    """ Function saves time mean surface flux data to NetCDF files
-        
-        *** Inputs ***
-        rundat : a RunInfo object
-
-        *** Outputs ***
-        daily NetCDF file containing 1-hourly global surface fluxes at 1x1 degree
-
-        *** Example ***
-        ./expand_savestate project=enkf_release sd=20000101 ed=20010101 """
-
-    import da.analysis.tools_time as timetools
-    import da.tools.io4 as io
-
-    dirname,filename    = os.path.split(infile)
-    dirname             = CreateDirs(os.path.join(rundat.outputdir,dirname.replace('weekly',avg) ) )
-
-    dectime0            = date2num(datetime(2000,1,1))
-
-# Create NetCDF output file
-#
-    saveas              = os.path.join(dirname,filename)
-    ncf                 = io.CT_CDF(saveas,'create')
-    dimdate             = ncf.AddDateDim()
-#
-# Open input file specified from the command line
-#
-    if not os.path.exists(infile):
-        print "Needed input file (%s) not found. Please create this first:"%infile
-        print "returning..."
-        return None 
-    else:
-        pass
-
-    file        = io.CT_Read(infile,'read')
-    datasets    = file.variables.keys()
-    date        = file.GetVariable('date')
-    globatts    = file.ncattrs()
-
-    time        = [datetime(2000,1,1)+timedelta(days=d) for d in date]
-
-#
-# Add global attributes, sorted 
-#
-
-    keys        = globatts
-    keys.sort()
-    for k in keys:
-        setattr(ncf,k,k)
-
-# loop over datasets in infile, skip idate and date as we will make new time axis for the averaged data
-
-    for sds in datasets:
-        if sds in ['idate','date'] : continue
-        print '['
-
-# get original data
-
-        data        = array(file.GetVariable(sds))
-        varatts     = file.variables[sds].ncattrs()
-        vardims     = file.variables[sds].dimensions
-#
-# Depending on dims of input dataset, create dims for output dataset. Note that we add the new dimdate now.
-#
-
-        dims=()
-        for d in vardims:
-            if 'latitude' in d: 
-                dimgrid     = ncf.AddLatLonDim()
-                dims        += (dimgrid[0],)
-            if 'longitude' in d: 
-                dimgrid     = ncf.AddLatLonDim()
-                dims        += (dimgrid[1],)
-            for type in ['eco','eco_ext','tc','tc_ext','olson']:
-                if 'regions_%s' % type == d: 
-                    dimregs     = ncf.AddRegionDim(type)
-                    dims        += dimregs
-#
-# If the variable name from the infile is not defined in the standard, simply copy attributes from the old to the new sds
-# Do not copy over the grid information
-#
-
-        if ncf.StandardVar(sds)['name'] == 'unknown':
-
-                savedict                    = ncf.StandardVar(sds)
-                savedict['name']            = sds
-                savedict['values']          = data.tolist()
-                savedict['dims']            = dims
-                savedict['units']           = file.variables[sds].units
-                savedict['long_name']       = file.variables[sds].long_name
-                savedict['comment']         = file.variables[sds].comment
-                savedict['standard_name']   = file.variables[sds].standard_name
-                savedict['count']           =0
-                ncf.AddData(savedict)
-                sys.stdout.write('.')
-                sys.stdout.flush()
-
-        else:
-
-            if sds in ['latitude','longitude']: continue
-
-            if avg == 'monthly':
-                time_avg, data_avg      = timetools.monthly_avg(time,data)
-            elif avg == 'seasonal':
-                time_avg, data_avg      = timetools.season_avg(time,data)
-            elif avg == 'yearly':
-                time_avg, data_avg      = timetools.yearly_avg(time,data)
-            elif avg == 'longterm':
-                time_avg, data_avg      = timetools.longterm_avg(time,data)
-                time_avg                = [time_avg]
-                data_avg                = [data_avg]
-            else:
-                raise ValueError,'Averaging (%s) does not exist' % avg
-
-            count=-1
-            for dd,data in zip(time_avg,data_avg):
-                count=count+1
-
-                if not ncf.has_date(date2num(dd)-dectime0): 
-
-                    savedict=ncf.StandardVar('date')
-                    savedict['values']=date2num(dd)-dectime0
-                    savedict['dims']=dimdate
-                    savedict['count']=count
-                    ncf.AddData(savedict)
-
-                savedict=ncf.StandardVar(sds)
-                savedict['values']=data.tolist()
-                savedict['units']=file.variables[sds].units
-                if 'cov' in sds: 
-                    savedict['dims']=dimdate+dims+dims
-                else:
-                    savedict['dims']=dimdate+dims
-                savedict['count']=count
-                ncf.AddData(savedict)
-
-                sys.stdout.write('.')
-                sys.stdout.flush()
-        print ']'
-
-
-# end NetCDF file access
-    file.close()
-    ncf.close()
-
-if __name__ == "__main__":
-
-    import logging
-    from da.tools.initexit import CycleControl
-    from da.ct.dasystem import CtDaSystem 
-    from da.ctgridded.statevector import CtGriddedStateVector 
-
-    sys.path.append('../../')
-
-    logging.root.setLevel(logging.DEBUG)
-
-    DaCycle = CycleControl(args={'rc':'../../dagridded.rc'})
-    DaCycle.Initialize()
-    DaCycle.ParseTimes()
-
-    DaSystem    = CtDaSystem('../rc/carbontrackergridded.rc')
-    DaSystem.Initialize()
-
-    DaCycle.DaSystem    = DaSystem
-
-    StateVector = CtGriddedStateVector(DaCycle)
-    dummy       = StateVector.Initialize()
-
-
-    no, yes, all = range(3)
-
-    proceed = no
-
-    # first, parse results from the inverse part of the run 
-    if proceed != all:
-        proceed = proceed_dialog('Create average 1x1 flux data files? [y|yes, n|no, a|all|yes-to-all ]')
-    if proceed != no:
-        while DaCycle['time.start'] < DaCycle['time.finish']:
-
-            savedas=SaveWeeklyAvg1x1Data(DaCycle, StateVector)
-            savedas=SaveWeeklyAvgStateData(DaCycle, StateVector)
-            savedas=SaveWeeklyAvgTCData(DaCycle, StateVector)
-
-            DaCycle.AdvanceCycleTimes()
-
-        sys.exit(2)
-
-        a=SaveTimeAvgData(rundat,savedas,avg='monthly')
-        a=SaveTimeAvgData(rundat,savedas,avg='yearly')
-        a=SaveTimeAvgData(rundat,savedas,avg='longterm')
-
-    if proceed != all:
-        proceed = proceed_dialog('Create average tc flux data files? [y|yes, n|no, a|all|yes-to-all] ')
-    if proceed != no:
-        a=SaveTimeAvgData(rundat,savedas,avg='monthly')
-        a=SaveTimeAvgData(rundat,savedas,avg='seasonal')
-        a=SaveTimeAvgData(rundat,savedas,avg='yearly')
-        a=SaveTimeAvgData(rundat,savedas,avg='longterm')
-    if proceed != all:
-        proceed = proceed_dialog('Create average extended tc flux data files? [y|yes, n|no, a|all|yes-to-all] ')
-    if proceed != no:
-        savedas=SaveTCDataExt(rundat)
-        a=SaveTimeAvgData(rundat,savedas,avg='monthly')
-        a=SaveTimeAvgData(rundat,savedas,avg='seasonal')
-        a=SaveTimeAvgData(rundat,savedas,avg='yearly')
-        a=SaveTimeAvgData(rundat,savedas,avg='longterm')
-    sys.exit(0)
diff --git a/da/analysis/map_to_tc.pickle b/da/analysis/map_to_tc.pickle
deleted file mode 100644
index ccea75a3084680e9e29f56401cee418c6c991ee8..0000000000000000000000000000000000000000
Binary files a/da/analysis/map_to_tc.pickle and /dev/null differ
diff --git a/da/analysis/olson19_region_names b/da/analysis/olson19_region_names
deleted file mode 100644
index 15aa414598ccc35500a352118535cc4bb4fc5c38..0000000000000000000000000000000000000000
--- a/da/analysis/olson19_region_names
+++ /dev/null
@@ -1,19 +0,0 @@
- 1 CONIFOR  "Conifer Forest"
- 2 BRODLFOR "Broadleaf Forest: temperate, subtropical drought"
- 3 MIXEDFOR "Mixed Forest: conifer/broadleaf; so. Boreal"
- 4 GRASSHRB "Grassland +/- Shrub or Tree"
- 5 TROPICFR "Tropical/subtr. Forest: montane, seasonal, rainforest "
- 6 SCRUBWDS "Scrub +/- Woodland &/or fields (evergreen/decid.) "
- 7 SEMIDTUN "Semidesert shrub/steppe; Tundra (polar, alpine) "
- 8 FLDWDSAV "Field/Woods complex &/or Savanna, tallgrass "
- 9 NORTAIGA "Northern Boreal Taiga woodland/tundra"
-10 FORFDREV "Forest/Field; Dry Evergreen broadleaf woods "
-11 WETLAND  "Wetlands: mires (bog/fen); marsh/swamp +/- mangrove "
-12 DESERTS  "Desert: bare/alpine; sandy; salt/soda "
-13 SHRBTRST "Shrub/Tree: succulent/thorn "
-14 CROPSETL "Crop/Settlement (irrigated or not) "
-15 CONIFRFC "Conifer snowy Rainforest, coastal      "
-27 WTNDMHTH "Wooded Tundra Margin; Heath/moorland "
-19 MANGROVE "Mangrove/wet forest/thicket + tidal flat "
-25 ICEPLDE  "Ice and Polar Desert"
-19 WATER    "Water bodies"
diff --git a/da/analysis/postagg_definitions.nc b/da/analysis/postagg_definitions.nc
deleted file mode 100644
index 4f0a0c0f1b4a8aad977b086e35fd0b13aed5b174..0000000000000000000000000000000000000000
Binary files a/da/analysis/postagg_definitions.nc and /dev/null differ
diff --git a/da/analysis/regions.nc b/da/analysis/regions.nc
deleted file mode 100644
index c4372f35c08b588570eda8b7574590500a911770..0000000000000000000000000000000000000000
Binary files a/da/analysis/regions.nc and /dev/null differ
diff --git a/da/analysis/runinfo.py b/da/analysis/runinfo.py
deleted file mode 100755
index 02f87289e61012882b6963172106b61879c77adb..0000000000000000000000000000000000000000
--- a/da/analysis/runinfo.py
+++ /dev/null
@@ -1,156 +0,0 @@
-#!/usr/bin/env python
-# runinfo.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 01 Mar 2011.
-
-"""
-
-import mysettings 
-import commands
-import os
-import sys
-from datetime import datetime, timedelta
-
-
-class RunInfo(object):
-    """ 
-    This is a CarbonTracker  run info object
-    Initialize it with the following keywords:
-   
-    MANDATORY: 
-    project  = Name of project, should correspond to a directory [string]
-    sd       = start date of project output [datetime object or integer yyyymmdd]
-    ed       = end date of project output [datetime object or integer yyyymmdd]
-    OPTIONAL:
-    basedir  = base directory of model output on your machine [string, default='~/Modeling/download']
-    outputdir= output directory for figures and summary files [string, default='~/Modeling/SEATA']
-
-    Tip: change the std_rundat dictionary in module filtertools to set new defaults for all values!
-    """
-    def __init__(self,infodict={},rcfile=None):
-        """ Initialize from the name of an rcfile, or by passing the dictionary directly """
-        import da.tools.rc as rc
-        import da.tools.io4 as io
-        import copy
-
-
-        if rcfile:
-            infodict    = rc.read(rcfile)
-
-        self.basedir    = infodict['dir.da_run']
-        self.proj       = os.path.split(self.basedir)[-1]
-        self.projdir    = self.basedir
-
-        self.dir_scheme = mysettings.ct_dir_scheme
-        
-        self.inputdir   = os.path.join(self.basedir,'output')
-        self.outputdir  = os.path.join(self.basedir,'analysis')
-
-        if not os.path.exists(self.outputdir): 
-            os.makedirs(self.outputdir)
-            print "Creating new output directory "+self.outputdir
- 
-        sd=infodict['time.start']
-        ed=infodict['time.finish']
-        dt=infodict['time.cycle']
-
-        self.dt = timedelta(days=int(dt))
-
-        if not isinstance(sd,datetime):
-            self.sd = datetime.strptime(sd,'%Y-%m-%d %H:%M:%S')
-        if not isinstance(ed,datetime):
-            self.ed = datetime.strptime(ed,'%Y-%m-%d %H:%M:%S')
-
-        dd             = copy.deepcopy(self.sd)
-        self.inputfiles= []
-        self.weeks     = []
-        self.inputdict = {}
-
-        while dd < self.ed:
-            filename = os.path.join(self.inputdir,'%s'%dd.strftime('%Y%m%d'),'savestate.nc')
-            if os.path.exists(filename):
-                self.inputfiles.append(filename)
-                self.weeks.append(dd)
-                self.inputdict[dd] = filename
-            else:
-                break
-                    
-            dd = dd+self.dt
-
-        self.ed     = dd
-        self.nweeks = len(self.weeks)
-
-        # run parameters from file
-        ncf             = io.CT_Read(self.inputfiles[0],'read')
-        self.nlag       = len(ncf.dimensions['nlag'])
-        self.nmembers   = len(ncf.dimensions['nmembers'])
-        self.nparameters  = len(ncf.dimensions['nparameters'])
-        ncf.close()
-
-        self.namingscheme = 'wp_Mar2011'
-        filename          = os.path.join('NamingScheme.'+self.namingscheme+'.rc')
-
-        try: 
-            rcinfo=rc.read(filename)
-        except IOError: 
-            print '%s was specified as naming scheme, but no matching %s rc-file was found, exiting...'%(NamingScheme,filename,)
-            sys.exit(1)
-        except:
-            print 'Unknown error reading rc-file: %s, exiting...'%(filename,) 
-            sys.exit(1)
-
-
-        self.namedict=rcinfo
-    
-    def __str__(self):
-        return 'project              : '+self.projdir+\
-               '\nstart date           : '+str(self.sd)+\
-               '\nend date             : '+str(self.ed)+\
-               '\ndelta date           : '+str(self.dt)+\
-               '\nnweeks               : '+str(self.nweeks)+\
-               '\nnparameters          : '+str(self.nparameters)+\
-               '\nnmembers             : '+str(self.nmembers)+\
-               '\nnlag                 : '+str(self.nlag)+\
-               '\nDA output dir        : '+self.inputdir+\
-               '\nanalysis output dir  : '+self.outputdir+\
-               '\nnaming scheme        : '+self.namingscheme
-
-def get_rundat_settings(args):
-    """ create settings dict for rundat from scripts arguments """
-
-    settings=std_rundat.copy()  # start with defaults for rundat
-
-    for items in args:
-        items=items.lower()
-        k, v = items.split('=')
-        if settings.has_key(k):
-            if k in std_constructors:
-                settings[k] = std_constructors[k](v)
-            else:
-                settings[k] = v
-        else: raise IOError,'Parameter unknown:%s'%(v,)
-
-    return settings
-
-
-
-
-
-if __name__ == "__main__":
-    import getopt
-    import sys
-    from string import *
-
-    sys.path.append('../../')
-
-# Parse keywords
-
-    rundat=RunInfo(rcfile='../../da.rc')
-
-    print rundat
-
-    sys.exit(0)
diff --git a/da/analysis/t3_region_names b/da/analysis/t3_region_names
deleted file mode 100644
index 0c8048b6a0b26b9f12a6db1bbdddeaf8e7076f0f..0000000000000000000000000000000000000000
--- a/da/analysis/t3_region_names
+++ /dev/null
@@ -1,45 +0,0 @@
- 1 T-NABR "North American Boreal"
- 2 T-NATM "North American Temperate"
- 3 T-SATR "South American Tropical"
- 4 T-SATM "South American Temperate"
- 5 T-NAFR "Northern Africa"
- 6 T-SAFR "Southern Africa"
- 7 T-EUBR "Eurasia Boreal"
- 8 T-EUTM "Eurasia Temperate"
- 9 T-ASTR "Tropical Asia"
-10 T-AUST "Australia"
-11 T-EURO "Europe"
-12 O-NPTM "North Pacific Temperate"
-13 O-WPTR "West Pacific Tropical"
-14 O-EPTR "East Pacific Tropical"
-15 O-SPTM "South Pacific Temperate"
-16 O-NOCN "Northern Ocean"
-17 O-NATM "North Atlantic Temperate"
-18 O-SATR "Atlantic Tropical"
-19 O-SATM "South Atlantic Temperate"
-20 O-SOCN "Southern Ocean"
-21 O-INTR "Indian Tropical"
-22 O-INTM "Indian Temperate"
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/da/analysis/tc_land11_oif30.hdf b/da/analysis/tc_land11_oif30.hdf
deleted file mode 100644
index dc79019abb10abc403fa5850c06e05418f03401b..0000000000000000000000000000000000000000
Binary files a/da/analysis/tc_land11_oif30.hdf and /dev/null differ
diff --git a/da/analysis/tools_regions.py b/da/analysis/tools_regions.py
deleted file mode 100755
index 71cc14917ebbb86704c3375996d62460b7c99489..0000000000000000000000000000000000000000
--- a/da/analysis/tools_regions.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python
-import commands
-import os
-import sys
-from da.analysis.tools_transcom import *
-
-# Aggregated olson ecosystem regions for CT Europe
-
-aggregates=[\
-("Forests/Wooded" , (1, 2, 3, 5, 8, 10,) ) ,\
-("Grass/Shrubs" , (4, 6, 13) ) ,\
-("Crops/Agriculture", (14,) ) ,\
-("Tundra/Taiga" , (7,9,16) ) ,\
-("Coastal/Wet" , (11,15,17) ) ,\
-("Ice/Water/Deserts" , (12,18,19) ) \
-]
-
-agg_dict={}
-for k,v in aggregates:
-    agg_dict[k]=v
-
-ext_econams=[a for a,b in aggregates]
-ext_ecocomps=[b for a,b in aggregates]
-
-eco19_to_ecosums = zeros((19, 6),float)
-for i,k in enumerate(ext_ecocomps):
-    indices = [x-1 for x in k]
-    eco19_to_ecosums[:,i].put(indices,1.0)
-
-##### END OF REGION DEFINITIONS
-
-def StateToGrid(values,regionmap,reverse=False,avg=False):
-    """ 
-    This method converts parameters from a CarbonTracker StateVector object to a gridded map of linear multiplication values. These
-    can subsequently be used in the transport model code to multiply/manipulate fluxes
-
-    """
-    import numpy as np
-    nregions  = regionmap.max()
-
-    # dictionary for region <-> map conversions
-
-    regs={}
-    for r in np.arange(1,nregions+1):
-         sel=(regionmap.flat == r).nonzero()
-         if len(sel[0])>0: regs[r]=sel
-
-    regionselect=regs
-
-    if reverse:
-
-        """ project 1x1 degree map onto ecoregions """
-
-        result=np.zeros(nregions,float)
-        for k,v in regionselect.iteritems():
-            if avg: 
-                result[k-1]=values.ravel().take(v).mean()
-            else : 
-                result[k-1]=values.ravel().take(v).sum()
-        return result
-
-    else:
-
-        """ project ecoregion properties onto 1x1 degree map """
-
-        result=np.zeros((180,360,),float)
-        for k,v in regionselect.iteritems():
-            result.put(v,values[k-1])
-
-        return result
-
-def globarea(im=360,jm=180,silent=True):
-    """ Function calculates the surface area according to TM5 definitions"""
-    import numpy as np
-
-    radius      = 6.371e6  # the earth radius in meters
-    deg2rad     = np.pi/180.
-    g           = 9.80665 
-
-    dxx         = 360.0/im*deg2rad 
-    dyy         = 180.0/jm*deg2rad 
-    lat         = np.arange(-90*deg2rad,90*deg2rad,dyy)
-    dxy         = dxx*(np.sin(lat+dyy)-np.sin(lat))*radius**2
-    area        = np.resize(np.repeat(dxy,im,axis=0) ,[jm,im])
-    if not silent:
-        print 'total area of field = ',np.sum(area.flat)
-        print 'total earth area    = ',4*np.pi*radius**2
-    return area
-
diff --git a/da/analysis/tools_time.py b/da/analysis/tools_time.py
deleted file mode 100755
index 40b5a95adcded08019a35184810f7c102ddf4787..0000000000000000000000000000000000000000
--- a/da/analysis/tools_time.py
+++ /dev/null
@@ -1,334 +0,0 @@
-#! /usr/bin/env python
-from datetime import datetime, timedelta
-import calendar
-from pylab import floor
-from matplotlib.dates import date2num, num2date
-from numpy import array, zeros, newaxis
-
-def Fromdatetime(date):
-    dt=date.timetuple()
-    return datetime(*dt[0:6])
-
-def increase_time(dd,**kwargs):
-    """ Function increases the time by specified amount"""
-    return dd+timedelta(**kwargs)
-
-def chardate(dd,cut=8):
-    return dd.strftime('%Y%m%d%H%M')[0:cut]
-
-def timegen(sd,ed,dt):
-    dd=[]
-    while sd <= ed:
-        dd.append(Fromdatetime(sd))
-        sd=sd+dt
-    return dd
-
-def itau2datetime(itau,iyear0):
-    """ Function returns a datetime object from TM5s itau times"""
-    date0=datetime(iyear0,1,1,0,0,0)
-    if len(itau)==1:
-           itau=[itau] 
-    for time in itau:
-            sec=time%60
-            min=(time/60)%60
-            hrs=(time/3600)%24
-            day=(time/86400) 
-            dt=timedelta(days=day,hours=hrs,minutes=min,seconds=sec)
-            yield date0+dt 
-
-def date2dec(date):
-    """ Function converts datetime object to a Decimal number time  (e.g., 1991.875 such as in IDL, CCG) """
-    if not isinstance(date,list): date=[date]
-
-    newdate=[]
-    for dd in date:
-        Days0=date2num(datetime(dd.year,1,1))
-        if calendar.isleap(dd.year):
-            DaysPerYear=366.
-        else:
-            DaysPerYear=365.
-        DayFrac=date2num(dd)
-        newdate.append(dd.year+(DayFrac-Days0)/DaysPerYear)
-    if len(newdate) == 1: return newdate[0] 
-    return newdate
-
-def dec2date(dectime):
-    """ Function converts decimal time from year fraction (e.g., 1991.875 such as in IDL, CCG) to a python datetime object """
-    dt=num2date(dec2num(dectime)).timetuple()
-    return datetime(*dt[0:7])
-
-def dec2num(dectime):
-    """ Function converts decimal time from year fraction (e.g., 1991.875 such as in IDL, CCG) to a python decimal numtime """
-    if not isinstance(dectime,list): dectime=[dectime]
-
-    newdectime=[]
-    for dd in dectime:
-        yr=floor(dd)
-        Days0=date2num(datetime(int(yr),1,1))
-        if calendar.isleap(yr):
-            DaysPerYear=366.
-        else:
-            DaysPerYear=365.
-        DayFrac=(dd-yr)*DaysPerYear
-        newdectime.append(Days0+DayFrac)
-    if len(newdectime) == 1: return newdectime[0] 
-    return newdectime
-
-def num2dec(numtime):
-    """ Function converts python decimal numtime to an IDL decimal time """
-    res=date2dec(num2mydate(numtime))
-    return res
-
-def num2mydate(num):
-    """ Function converts decimal time from year fraction (e.g., 1991.875 such as in IDL, CCG) to a python datetime object """
-    dt=num2date(num).timetuple()
-    return datetime(*dt[0:7])
-
-def monthgen(sd,ed):
-    """ Generate sequence of datetime objects spaced by one month"""
-    from pylab import arange
-    if ed<sd: 
-        raise ValueError,'start date exceeds end date'
-        sys.exit(2)
-    dates=[]
-    for year in arange(sd.year,ed.year+2):
-        for month in arange(1,13):
-            date=datetime(year,month,1)
-            if date > ed: return dates
-            else: dates.append(date)
-
-def nextmonth(dd):
-    """ Find next 1st of the month following the date dd"""
-
-    if dd.month == 12: 
-        cc=dd.replace(year=dd.year+1)
-        ee=cc.replace(month=1)
-    else:
-        ee=dd.replace(month=dd.month+1)
-    ff = ee.replace(day=1)
-    return ff
-
-def in_interval(start,stop,times_in):
-    """ returns a list of fractions in time interval """
-    from numpy import logical_and, arange
-    from pylab import drange, num2date, date2num
-    from datetime import timedelta
-    import copy
-
-    times=copy.copy(times_in)
-
-    interval=times[1]-times[0]
-    times.append(times[-1]+interval)  # extend by one interval
-    times_filled=[times[0]+timedelta(days=d) for d in range((times[-1]-times[0]).days)]
-
-    b=[]
-    in_int=0.0
-    for t in times_filled:   # loop over days
-        if t in times[1:]:   # if new interval starts
-            b.append(in_int) # add previous aggregate to output
-            in_int=0.0         # reset counter
-        in_int+=int(logical_and(t >= start,t < stop))  # count if in interval [start,stop >
-    b.append(in_int)
-
-    if len(b) != len(times_in) : raise ValueError
-
-    return b
-
-def yearly_avg(time,data,sdev=False):
-    """ make monthly average from array using rundat and data"""
-
-    years=array([d.year for d in time])
-    
-    aa=[]
-    ss=[]
-    tt=[]
-    dd=time[0]
-    ed=time[-1]
-    while dd <= ed:
-        ddnext=datetime(dd.year+1,1,1)
-        weights=in_interval(dd,ddnext,time)
-        if len(weights) > 1:
-            weights=array(weights)
-            if weights.sum() > 0.0:
-                weights=weights/weights.sum()
-            else:
-                weights=weights
-
-            if weights.shape[0] != data.shape[0]:
-                raise ValueError,'yearly_avg has wrongly shaped weights (%d) for data of (%d)' %(weights.shape[0],data.shape[0])
-                 
-            sel=(weights != 0.0).nonzero()[0]     
-            #print sel,array(time).take(sel),dd,ddnext
-            if data.ndim == 1:
-                avg_data=(weights.take(sel)*data.take(sel,axis=0)).sum(axis=0)
-                std_data=(weights.take(sel)*data.take(sel,axis=0)).std(axis=0)
-            elif data.ndim == 2:
-                avg_data=(weights.take(sel)[:,newaxis]*data.take(sel,axis=0)).sum(axis=0).squeeze()
-                std_data=(weights.take(sel)[:,newaxis]*data.take(sel,axis=0)).std(axis=0).squeeze()
-            elif data.ndim == 3:
-                avg_data=(weights.take(sel)[:,newaxis,newaxis]*data.take(sel,axis=0)).sum(axis=0).squeeze()
-                std_data=(weights.take(sel)[:,newaxis,newaxis]*data.take(sel,axis=0)).std(axis=0).squeeze()
-            else:
-                raise ValueError,'yearly_avg takes 1, 2, or 3d arrays only'
-        elif len(weights)==1:    
-            avg_data=data[0]
-            std_data=0.0
-        else:
-            continue # next year
-
-        aa.append(avg_data)
-        ss.append(std_data)
-        tt.append(datetime(dd.year,6,15))
-
-        dd=ddnext
-
-    aa=array(aa).squeeze()
-    ss=array(ss).squeeze()
-    time=tt
-    if len(tt) == 1: 
-        aa=aa.reshape(1,*aa.shape)
-        ss=ss.reshape(1,*ss.shape)
-    if sdev: return time,aa, ss
-    else : return time,aa
-
-def monthly_avg(time,data,sdev=False):
-    """ make monthly average from array using rundat and data"""
-
-    years=array([d.year for d in time])
-    months=array([d.month for d in time])
-    
-    mm=[]
-    ss=[]
-    tt=[]
-    dd=time[0]
-    ed=time[-1]
-
-    while dd <= ed:
-        ddnext=nextmonth(dd)
-        weights=in_interval(dd,ddnext,time)
-        if len(weights) > 1:
-            weights=array(weights)
-            if weights.sum() > 0.0:
-                weights=weights/weights.sum()
-            else:
-                weights=weights
-
-            if weights.shape[0] != data.shape[0]:
-                raise ValueError,'yearly_avg has wrongly shaped weights (%d) for data of (%d)' %(weights.shape[0],data.shape[0])
-            
-            sel=(weights != 0.0).nonzero()[0]     
-            #print sel,array(time).take(sel),dd,nextmonth(dd)
-            if data.ndim == 1:
-                avg_data=(weights.take(sel)*data.take(sel,axis=0)).sum(axis=0)
-                std_data=(weights.take(sel)*data.take(sel,axis=0)).std(axis=0)
-            elif data.ndim == 2:
-                avg_data=(weights.take(sel)[:,newaxis]*data.take(sel,axis=0)).sum(axis=0).squeeze()
-                std_data=(weights.take(sel)[:,newaxis]*data.take(sel,axis=0)).std(axis=0).squeeze()
-            elif data.ndim == 3:
-                avg_data=(weights.take(sel)[:,newaxis,newaxis]*data.take(sel,axis=0)).sum(axis=0).squeeze()
-                std_data=(weights.take(sel)[:,newaxis,newaxis]*data.take(sel,axis=0)).std(axis=0).squeeze()
-            else:
-                raise ValueError,'monthly_avg takes 1, 2, or 3d arrays only'
-        elif len(weights)==1:    
-            avg_data=data[0]
-            std_data=0.0
-        else:
-            continue # next month
-
-        mm.append(avg_data)
-        ss.append(std_data)
-        tt.append(datetime(dd.year,dd.month,15))
-
-        dd=ddnext
-
-
-    mm=array(mm).squeeze()
-    ss=array(ss).squeeze()
-    time=tt
-
-    if len(tt) == 1: 
-        mm=mm.reshape(-1,*mm.shape)
-        ss=ss.reshape(-1,*ss.shape)
-
-    if sdev: return time,mm, ss
-    else : return time,mm
-
-def season_avg(time,data,sdev=False):
-    """ make season average from array using rundat and data"""
-
-    seas = [[1,2,3],[4,5,6],[7,8,9],[10,11,12]]
-
-    mm=[]
-    ss=[]
-    tt=[]
-    dd=time[0]
-    ed=time[-1]
-
-    while dd <= ed:
-        ddmid=nextmonth(dd)
-        ddnext=nextmonth(nextmonth(nextmonth(dd)))
-        weights=in_interval(dd,ddnext,time)
-        if len(weights) > 1:
-            weights=array(weights)
-            if weights.sum() > 0.0:
-                weights=weights/weights.sum()
-            else:
-                weights=weights
-
-            if weights.shape[0] != data.shape[0]:
-                raise ValueError,'yearly_avg has wrongly shaped weights (%d) for data of (%d)' %(weights.shape[0],data.shape[0])
-            
-            sel=(weights != 0.0).nonzero()[0]     
-            #print sel,array(time).take(sel),dd,nextmonth(dd)
-            if data.ndim == 1:
-                avg_data=(weights.take(sel)*data.take(sel,axis=0)).sum(axis=0)
-                std_data=(weights.take(sel)*data.take(sel,axis=0)).std(axis=0)
-            elif data.ndim == 2:
-                avg_data=(weights.take(sel)[:,newaxis]*data.take(sel,axis=0)).sum(axis=0).squeeze()
-                std_data=(weights.take(sel)[:,newaxis]*data.take(sel,axis=0)).std(axis=0).squeeze()
-            elif data.ndim == 3:
-                avg_data=(weights.take(sel)[:,newaxis,newaxis]*data.take(sel,axis=0)).sum(axis=0).squeeze()
-                std_data=(weights.take(sel)[:,newaxis,newaxis]*data.take(sel,axis=0)).std(axis=0).squeeze()
-            else:
-                raise ValueError,'season_avg takes 1, 2, or 3d arrays only'
-        elif len(weights)==1:    
-            avg_data=data[0]
-            std_data=0.0
-        else:
-            continue # next month
-
-        mm.append(avg_data)
-        ss.append(std_data)
-        tt.append(datetime(ddmid.year,ddmid.month,15))
-
-        dd=ddnext
-
-
-    mm=array(mm).squeeze()
-    ss=array(ss).squeeze()
-    time=tt
-
-    if len(tt) == 1: 
-        mm=mm.reshape(-1,*mm.shape)
-        ss=ss.reshape(-1,*ss.shape)
-
-    if sdev: return time,mm, ss
-    else : return time,mm
-
-def longterm_avg(time,data):
-    """ Create long term mean """
-
-    time_avg = num2date(date2num(time).mean())
-    data_avg = data.mean(axis=0)
-
-    return time_avg, data_avg
-
-
-
-if __name__ == '__main__':
-    #print monthgen(datetime(2000,1,1),datetime(2006,5,1))
-    dd=datetime(2002,3,1)
-    print nextmonth(dd),dd
-
-        
-         
diff --git a/da/analysis/tools_transcom.py b/da/analysis/tools_transcom.py
deleted file mode 100755
index d8ff9b6f9f26da51751428b31dae88ae813f30d6..0000000000000000000000000000000000000000
--- a/da/analysis/tools_transcom.py
+++ /dev/null
@@ -1,360 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import sys
-sys.path.append('../../')
-rootdir     = os.getcwd().split('da/')[0]
-analysisdir = os.path.join(rootdir,'da/analysis')
-
-from string import join, split
-from numpy import array, identity, zeros, arange
-import da.tools.io4 as io
-
-# Get masks of different region definitions
-
-matrix_file     = os.path.join(analysisdir,'regions.nc')
-cdf_temp        = io.CT_CDF(matrix_file,'read')
-transcommask    = cdf_temp.GetVariable('transcom_regions')
-olson240mask    = cdf_temp.GetVariable('regions')
-olsonmask       = cdf_temp.GetVariable('land_ecosystems')
-oifmask         = cdf_temp.GetVariable('ocean_regions')
-dummy           = cdf_temp.close()
-
-# Names and short names of TransCom regions
-
-transshort  = []
-transnams   = []
-transland   = []
-temp        = open(os.path.join(analysisdir,'t3_region_names'),'r').readlines()
-for line in temp:
-    items   = line.split()
-    if items:
-        num,abbr,name=(items[0],items[1],join(items[2:]),)
-        transnams.append(name.strip('"'))
-        transshort.append(abbr)
-        if abbr.startswith('T'): transland.append(name.strip('"'))
-
-transnams.append("Non-optimized")
-transshort.append("I-NNOP")
-
-# Names and short names of Olson regions
-
-olsonnams   = []
-olsonshort  = []
-temp        = open(os.path.join(analysisdir,'olson19_region_names'),'r').readlines()
-for line in temp:
-    items   = line.split()
-    if items:
-        num,abbr,name=(items[0],items[1],join(items[2:]),)
-        olsonnams.append(name.strip('"'))
-        olsonshort.append(abbr)
-
-ext_transnams   = []
-ext_transshort  = []
-ext_transcomps  = []
-
-# Get names of aggregated regions for post aggregation
-
-matrix_file     = os.path.join(analysisdir,'postagg_definitions.nc')
-cdf_temp        = io.CT_CDF(matrix_file,'read')
-xform           = cdf_temp.GetVariable('xform')
-keys            = cdf_temp.ncattrs()
-
-keys.sort()
-for k in keys:
-    if 'longname' in k: 
-        ext_transnams.append(getattr(cdf_temp,k) )
-    if 'shortname' in k: 
-        ext_transshort.append(getattr(cdf_temp,k) )
-    if 'component' in k: 
-        ext_transcomps.append(map(int,getattr(cdf_temp,k).split(',')) )
-
-dummy           = cdf_temp.close()
-
-
-# Names of the ocean inversion flux regions, to go along with oifmask
-
-oifnams=['(1) NOCN Arctic Ocean',\
-         '(2) NAH  North Atlantic (49 - 76N)',\
-         '(3) NAM  North Atlantic (36 - 49N)',\
-         '(4) NAL  North Atlantic (18 - 36N)',\
-         '(5) NAT  North Atlantic ( 0 - 18N)',\
-         '(6) SAT  South Atlantic ( 0 - 18S)',\
-         '(7) SAL  South Atlantic (18 - 31S)',\
-         '(8) SAM  South Atlantic (31 - 44S)',\
-         '(9) SAH  South Atlantic (44 - 58S)',\
-         '(10) SOCN Southern Ocean (S of 58S)',\
-         '(11) NPHW North Pacific (N of 49N, W of 195E)',\
-         '(12) NPHE North Pacific (N of 36N, E of 195E)',\
-         '(13) NPK  North Pacific (Kuroshio Extension)',\
-         '(14) NPLW North Pacific (18N - K.Ext, W of 195E)',\
-         '(15) NPLE North Pacific (18 - 36N, E of 195E)',\
-         '(16) NPTW North Pacific ( 0 - 18N, W of 199E)',\
-         '(17) NPTE North Pacific ( 0 - 18N, E of 199E)',\
-         '(18) SPTW South Pacific ( 0 - 18S, W of 199E)',\
-         '(19) SPTE South Pacific ( 0 - 18S, E of 199E)',\
-         '(20) SPLW South Pacific (18 - 31S, W of 233E)',\
-         '(21) SPLE South Pacific (18 - 31S, E of 233E)',\
-         '(22) SPMW South Pacific (31 - 44S, W of 248E)',\
-         '(23) SPME South Pacific (31 - 44S, E of 248E, W of 278E)',\
-         '(24) SPMC South Pacific (31 - 44S, coastal E of 278E)',\
-         '(25) SPH  South Pacific (44 - 58S)              ',\
-         '(26) NI   North Indian',\
-         '(27) SIT  South Indian (0 - 18S)',\
-         '(28) SIL  South Indian (18 - 31S)',\
-         '(29) SIM  South Indian (31 - 44S)',\
-         '(30) SIH  South Indian (44 - 58S)']
-
-oiflocs=[ (200, 80,),\
-          (330, 55,),\
-          (330, 40,),\
-          (330, 22,),\
-          (330,  8,),\
-          (350,-12,),\
-          (350,-27,),\
-          (350,-40,),\
-          (350,-53,),\
-          (200,-70,),\
-          (178, 54,),\
-          (210, 40,),\
-          (165, 38,),\
-          (178, 25,),\
-          (215, 25,),\
-          (170, 8,),\
-          (230, 8,),\
-          (175,-10,),\
-          (240,-10,),\
-          (195,-27,),\
-          (265,-27,),\
-          (195,-40,),\
-          (262,-40,),\
-          (283,-40,),\
-          (220,-53,),\
-          (68,  8,),\
-          (75, -10,),\
-          (75, -27,),\
-          (75,-40,),\
-          (75, -53,)]
-
-
-translocs=[ (-177,0), \
-            (-92,53,),\
-            (-108,34,),\
-            (-66,4,),\
-            (-50,-17,),\
-            (15,17,),\
-            (26,-12,),\
-            (84,63,),\
-            (103,30,),\
-            (115,0,),\
-            (132,-25,),\
-            (9,50,),\
-            (-174,46,),\
-            (136,6,),\
-            (-108,6,),\
-            (-123,-15,),\
-            (-32,58,),\
-            (-32,38,),\
-            (-32,0,),\
-            (-32,-38,),\
-            (-14,-65,),\
-            (68,2,)]
-
-#olsonshort=[str(name.split()[1:2]).join('  ') for name in olsonnams]
-old_olsonshort=[join(split(name,' ')[1:2],' ') for name in olsonnams]
-
-olsonlabs=['Conifer Forest','Broadleaf Forest','Mixed Forest','Grass/Shrub','Tropical Forest','Scrub/Woods','Semitundra','Fields/Woods/\nSavanna',\
-     'Northern Taiga','Forest/Field','Wetland','Deserts','Shrub/Tree/\nSuc ','Crops','Conifer\n Snowy/Coastal',\
-     'Wooded tundra','Mangrove','Ice and \nPolar desert','Water'] 
-
-ecmwfnams=[ ' 1 CRPSMF Crops, mixed farming',\
-            ' 2 SHGRSS Short Grass',\
-            ' 3 EVNDLF Evergreen Needleleaf',\
-            ' 4 DECNDLF Deciduous Needleleaf',\
-            ' 5 EVBRDLF Evergreen Broadleaf',\
-            ' 6 DECBRLF Deciduous Broadleaf',\
-            ' 7 TLGRSS Tall Grass',\
-            ' 8 DES Desert',\
-            ' 9 TDR Tundra',\
-            '10 IRRCR Irrigated Crops',\
-            '11 SMDES Semidesert',\
-            '12 ICE Ice Caps',\
-            '13 BGM Bogs and Marches',\
-            '14 INW Inland Water',\
-            '15 OCE Ocean',\
-            '16 EVSHRB Evergreen Shrubs',\
-            '17 DECSHR Deciduous shrubs',\
-            '18 MXFRST Mixed Forest',\
-            '19 INTFRST Interrupted Forest'] 
-
-ecmwfshort=[str(name.split()[1:2]).join('  ') for name in ecmwfnams]
-
-ecmwflabs=['Crops, mixed farming','Short Grass','Evergreen Needleleaf','Deciduous Needleleaf','Evergreen Broadleaf',\
-      'Deciduous Broadleaf','Tall Grass','Desert',\
-     'Tundra','Irrigated Crops','Semidesert','Ice Caps','Bogs and Marches','Inland Water','Ocean',\
-     'Evergreen Shrubs','Deciduous shrubs','Mixed Forest','Interrupted Forest'] 
-
-a =array([\
-  0 ,0 ,0 ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,1  ,1  ,1  ,1  ,1  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0 ,\
-  0 ,0 ,0 ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,1  ,0  ,1  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0 ,\
-  0 ,0 ,0 ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,1  ,0  ,1  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0 ,\
-  0 ,0 ,0 ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,1  ,1  ,1  ,1  ,1  ,0  ,0  ,0  ,0  ,0  ,0 ,\
-  1 ,1 ,0 ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0 ,\
-  0 ,0 ,1 ,1  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0 ,\
-  0 ,0 ,0 ,0  ,1  ,1  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0 ,\
-  0 ,0 ,0 ,0  ,0  ,0  ,1  ,1  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0 ,\
-  0 ,0 ,0 ,0  ,0  ,0  ,0  ,0  ,1  ,1  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,1  ,0  ,0  ,0  ,0  ,1 ,\
-  0 ,0 ,0 ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,1  ,1  ,0  ,0  ,0 ,\
-  0 ,0 ,0 ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,0  ,1  ,1  ,0])
-O30to11     = a.reshape(11,30).transpose()
-
-O11to11     = identity(11)
-
-ntcland     = 11 # TC standard
-ntcocean    = 11 # TC standard
-
-Ols259_to_TC23  = zeros((259,23),float)
-Ols240_to_TC23  = zeros((240,23),float)
-Ols221_to_TC23  = zeros((221,23),float)
-for i in arange(ntcland): 
-    Ols259_to_TC23[i*19:(i+1)*19,i]=1.0
-
-Ols259_to_TC23[190:228,10]      = 1.0  # Europe
-Ols259_to_TC23[228:258,11:22]   = O30to11
-for i in arange(ntcland): Ols240_to_TC23[i*19:(i+1)*19,i]=1.0
-Ols240_to_TC23[209:239,11:22]   = O30to11
-for i in arange(ntcland): Ols221_to_TC23[i*19:(i+1)*19,i]=1.0
-Ols221_to_TC23[209:220:,11:22]  = O11to11
-
-Ols221_to_TC23[220,22]=1.0
-Ols240_to_TC23[239,22]=1.0
-Ols259_to_TC23[258,22]=1.0
-
-
-ntcland     = 11 # TC standard
-ntcocean    = 11 # TC standard
-
-ExtendedTCRegionsFile='postagg_definitions.nc'
-
-def StateToTranscom(runinfo,x):
-    """ convert to transcom shaped regions"""
-    from numpy import dot
-
-    try:
-        nparams     = runinfo.nparameters 
-    except:
-        nparams     = runinfo
-    if nparams==240: 
-        M=Ols240_to_TC23
-    elif nparams==221: 
-        M=Ols221_to_TC23
-    elif nparams==259: 
-        M=Ols259_to_TC23
-    else:
-        raise ValueError('Do not know how to convert %s regions to 23 transcom regions'%(nparams,))
-
-    return dot(array(x).squeeze(),M)
-
-def StateCovToTranscom(runinfo,p):
-    """ convert to transcom shaped regions"""
-    try:
-        nparams     = runinfo.nparameters 
-    except:
-        nparams     = runinfo
-    if nparams==240: 
-        M=Ols240_to_TC23
-    elif nparams==221: 
-        M=Ols221_to_TC23
-    elif nparams==259: 
-        M=Ols259_to_TC23
-    else:
-        raise ValueError('Do not know how to convert %s regions to 23 transcom regions'%(nparams,))
-
-    return transpose(dot(dot(transpose(M),p),M),(1,0,2))
-
-def ExtendedTCRegions(data,cov=False):
-    """ convert to extended transcom shaped regions"""
-    import da.tools.io4 as io 
-    from numpy import dot
-
-    nparams     = data.shape[-1]
-    if nparams != 23 :
-        raise ValueError('Do not know how to convert %s regions to 37 extended transcom regions'%(nparams,))
-    M           = xform
-
-    if not cov:
-        return dot(array(data).squeeze(),M)
-    else:
-        return transpose(dot(transpose(M),dot(array(data).squeeze(),M)),(1,0,2))
-
-def cov2corr(A):
-    b=1./sqrt(A.diagonal())
-    return A*dot(b[:,newaxis],b[newaxis,:])
-
-def map_to_tc(data):
-    """ function projects 1x1 degree map onto TransCom regions by adding gridboxes over larger areas """
-    from hdf2field import Sds2field 
-    import cPickle
-    import os
-    from plottools import rebin
-
-    transcommapfile= 'tc_land11_oif30.hdf'
-    transcomconversionfile= 'map_to_tc.pickle'
-    try:
-        regionselect = cPickle.load(open(transcomconversionfile,'rb'))
-    except:
-        # read map from NetCDF
-        print '[in map_to_tc() in tctools.py:] '+ \
-              'Creating conversion map and pickle file for future quick use, patience please...'
-        map = Sds2field(transcommapfile,'tc_region')
-
-        # create dictionary for region <-> map conversions based on 1x1 map
-        
-        regs={}
-        nregions=map.max()
-        for r in arange(1,nregions+1):
-             sel=(map.flat == r).nonzero()
-             if len(sel[0])>0: regs[r]=sel
-        regionselect=regs
-        dummy = cPickle.dump(regionselect,open(transcomconversionfile,'wb'),-1)
-    
-    result=zeros(len(regionselect.keys()),float)
-    for k,v in regionselect.iteritems():
-        result[k-1]=data.ravel().take(v).sum()
-    return result
-
-def LookUpName(rundat,reg=33,tc=False,eco=False, olson=False, longnames=False):
-    """ return name of region number reg """
-
-    if longnames:
-        econames=olsonnams
-    else :
-        econames=olsonshort
-
-    if tc:
-        return (transnams[reg-1],)
-    elif eco:
-        if reg > rundat.npparameters:
-            raise IOError,'Region number exceeds definitions'
-        elif reg > rundat.n_land and reg != rundat.nparameters: 
-            ret=('Ocean', oifnams[reg-rundat.n_land-1])
-        elif reg > 209 and reg <= rundat.n_land:
-            ret=('Europe', econames[(reg-1)%19]+"_East")
-        elif reg == rundat.nparameters:
-            ret=(transnams[-1])
-        else:
-            ret=(transnams[(reg-1)/19], econames[(reg-1)%19])
-        return ret
-    elif olson:
-        return (econames[(reg-1)%19],)
-
-if __name__ == '__main__':
-    print transnams
-    print transshort
-    print ext_transnams
-    print ext_transshort
-    print olsonnams
-    print olsonshort
-    print ext_transcomps
-    
-
diff --git a/da/baseclasses/__init__.py b/da/baseclasses/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/da/baseclasses/dasystem.py b/da/baseclasses/dasystem.py
deleted file mode 100755
index 881551bf07ab0c07009ec875bd9d4ed136ad4f59..0000000000000000000000000000000000000000
--- a/da/baseclasses/dasystem.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python
-# control.py
-
-"""
-.. module:: dasystem
-.. moduleauthor:: Wouter Peters 
-
-Revision History:
-File created on 26 Aug 2010.
-
-The DaSystem class is found in the module :mod:`dasystem`, or in a specific implementation under the da/ source tree. It is derived from the standard python :class:`dictionary` object. 
-
-It describes the details of the data assimilation system used (i.e., CarbonTracker, or CT Methane, or ....) ::
-
-    datadir         : /Volumes/Storage/CO2/carbontracker/input/ct08/   ! The directory where input data is found
-    obs.input.dir   : ${datadir}/obsnc/with_fillvalue                  ! the observation input dir
-    obs.input.fname : obs_forecast.nc                                  ! the observation input file
-    ocn.covariance  : ${datadir}/oif_p3_era40.dpco2.2000.01.hdf        ! the ocean flux covariance file
-    bio.covariance  : ${datadir}/covariance_bio_olson19.nc             ! the biosphere flux covariance file
-    deltaco2.prefix : oif_p3_era40.dpco2                               ! the type of ocean product used
-    regtype         : olson19_oif30                                    ! the ecoregion definitions
-    nparameters     : 240                                              ! the number of parameters to solve for
-    random.seed     : 4385                                             ! the random seed for the first cycle
-    regionsfile     : transcom_olson19_oif30.hdf                       ! the ecoregion defintion mask file
-
-    ! Info on the sites file used
-
-    obs.sites.rc        : ${datadir}/sites_and_weights_co2.ct10.rc     ! the weights in the covariance matric of each obs
-
-The full baseclass description:
-
-.. autoclass:: da.baseclasses.dasystem.DaSystem
-   :members:
-
-"""
-
-import os
-import sys
-import logging
-import datetime
-
-################### Begin Class DaSystem ###################
-
-class DaSystem(dict):
-    """ 
-    Information on the data assimilation system used. This is normally an rc-file with settings.
-    """
-
-    def __init__(self, rcfilename):
-        """
-        Initialization occurs from passed rc-file name, items in the rc-file will be added
-        to the dictionary
-        """
-
-        self.Identifier = 'CarbonTracker CO2'    # the identifier gives the platform name
-        self.LoadRc(rcfilename)
-
-        logging.debug('Data Assimilation System initialized: %s' % self.Identifier)
-
-
-    def __str__(self):
-        """
-        String representation of a DaInfo object
-        """
-
-        msg = "==============================================================="    ; print msg
-        msg = "DA System Info rc-file is %s" % self.RcFileName                                ; print msg
-        msg = "==============================================================="    ; print msg
-
-        return ""
-
-
-    def LoadRc(self, RcFileName):
-        """ 
-        This method loads a DA System Info rc-file with settings for this simulation 
-        """
-        import da.tools.rc as rc 
-
-        for k, v in rc.read(RcFileName).iteritems():
-            self[k] = v
-        self.RcFileName = RcFileName
-        self.DaRcLoaded = True
-
-        logging.debug('DA System Info rc-file (%s) loaded successfully' % self.RcFileName)
-
-
-    def Initialize(self):
-        """ 
-        Initialize the object. 
-        """
-
-
-    def Validate(self):
-        """ 
-        Validate the contents of the rc-file given a dictionary of required keys
-        """
-
-        needed_rc_items = {}
-
-        for k, v in self.iteritems():
-            if v == 'True' : self[k] = True
-            if v == 'False': self[k] = False
-
-        for key in needed_rc_items:
-            if not self.has_key(key):
-                msg = 'Missing a required value in rc-file : %s' % key
-                logging.error(msg)
-                raise IOError, msg
-        logging.debug('DA System Info settings have been validated succesfully')
-
-################### End Class DaSystem ###################
-
-
-if __name__ == "__main__":
-    pass
diff --git a/da/baseclasses/obs.py b/da/baseclasses/obs.py
deleted file mode 100755
index 1dec17df1268ce998bb965a269489be7b3c35cd4..0000000000000000000000000000000000000000
--- a/da/baseclasses/obs.py
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/usr/bin/env python
-# obs.py
-
-"""
-.. module:: obs
-.. moduleauthor:: Wouter Peters 
-
-Revision History:
-File created on 28 Jul 2010.
-
-.. autoclass:: da.baseclasses.obs.Observation 
-   :members: Initialize, Validate, AddObs, AddSimulations, AddModelDataMismatch, WriteSampleInfo  
-
-.. autoclass:: da.baseclasses.obs.ObservationList 
-   :members: __init__
-
-"""
-import os
-import sys
-import logging
-import datetime
-from numpy import array, ndarray
-
-identifier = 'Observation baseclass'
-version = '0.0'
-
-################### Begin Class Observation ###################
-
-class Observation(object):
-    """ 
-    The baseclass Observation is a generic object that provides a number of methods required for any type of observations used in 
-    a data assimilation system. These methods are called from the CarbonTracker pipeline. 
-
-    .. note:: Most of the actual functionality will need to be provided through a derived Observations class with the methods 
-              below overwritten. Writing your own derived class for Observations is one of the first tasks you'll likely 
-              perform when extending or modifying the CarbonTracker Data Assimilation Shell.
-
-    Upon initialization of the class, an object is created that holds no actual data, but has a placeholder attribute `self.Data` 
-    which is an empty list of type :class:`~da.baseclasses.obs.ObservationList`. An ObservationList object is created when the 
-    method :meth:`~da.baseclasses.obs.Observation.AddObs` is invoked in the pipeline. 
-
-    From the list of observations, a file is written  by method 
-    :meth:`~da.baseclasses.obs.Observation.WriteSampleInfo`
-    with the sample info needed by the 
-    :class:`~da.baseclasses.observationoperator.ObservationOperator` object. The values returned after sampling 
-    are finally added by :meth:`~da.baseclasses.obs.Observation.AddSimulations`
-
-    """ 
-
-    def __init__(self, DaCycle=None):
-        """
-        create an object with an identifier, version, and an empty ObservationList
-        """
-        self.Identifier = self.getid()
-        self.Version = self.getversion()
-        self.Data = ObservationList([])  # Initialize with an empty list of obs
-
-        # The following code allows the object to be initialized with a DaCycle object already present. Otherwise, it can
-        # be added at a later moment.
-
-        if DaCycle != None:
-            self.DaCycle = DaCycle
-        else:
-            self.DaCycle = {}
-
-        logging.info('Observation object initialized: %s' % self.Identifier)
-
-    def getid(self):
-        """
-        Return the identifier of the Observation Object, used for logging purposed
-        """
-        return identifier
-
-    def getversion(self):
-        """
-        Return the version of the Observation Object, used for logging purposed
-        """
-        return identifier
-
-    def __str__(self):
-        """ Prints a list of Observation objects"""
-        return "This is a %s object, version %s" % (self.Identifier, self.Version)
-
-    def Initialize(self):
-        """ Perform all steps needed to start working with observational data, this can include moving data, concatenating files,
-            selecting datasets, etc.
-        """
-
-    def Validate(self):
-        """ Make sure that data needed for the ObservationOperator (such as observation input lists, or parameter files)
-            are present.
-        """
-
-    def AddObs(self):
-        """ 
-        Add actual observation data to the Observation object. This is in a form of an 
-        :class:`~da.baseclasses.obs.ObservationList` that is contained in self.Data. The 
-        list has as only requirement that it can return the observed+simulated values 
-        through the method :meth:`~da.baseclasses.obs.ObservationList.getvalues`
-
-        """
-
-    def AddSimulations(self):
-        """ Add the simulation data to the Observation object. 
-        """
-
-    def AddModelDataMismatch(self):
-        """ 
-            Get the model-data mismatch values for this cycle.
-
-        """
-
-    def WriteSampleInfo(self):
-        """ 
-            Write the information needed by the observation operator to a file. Return the filename that was written for later use
-        """
-
-
-
-################### End Class Observations ###################
-
-################### Begin Class ObservationList ###################
-
-class ObservationList(list):
-    """ This is a special type of list that holds observed sample objects. It has methods to extract data from such a list easily """
-
-    def getvalues(self, name, constructor=array):
-
-        result = constructor([getattr(o, name) for o in self])
-        if isinstance(result, ndarray): 
-            return result.squeeze()
-        else:
-            return result
-
-################### End Class ObservationList ###################
-
diff --git a/da/baseclasses/observationoperator.py b/da/baseclasses/observationoperator.py
deleted file mode 100755
index a5e9cb6366b550d4fe5afe942765ff74881bb577..0000000000000000000000000000000000000000
--- a/da/baseclasses/observationoperator.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env python
-# model.py
-
-"""
-.. module:: observationoperator
-.. moduleauthor:: Wouter Peters 
-
-Revision History:
-File created on 30 Aug 2010.
-
-"""
-
-import os
-import sys
-import logging
-import datetime
-
-identifier = 'GeneralObservationOperator'
-version = '0.0'
-
-################### Begin Class ObservationOperator ###################
-class ObservationOperator(object):
-    """
-    Testing
-    =======
-    This is a class that defines an ObervationOperator. This object is used to control the sampling of
-    a statevector in the ensemble Kalman filter framework. The methods of this class specify which (external) code
-    is called to perform the sampling, and which files should be read for input and are written for output.
-
-    The baseclasses consist mainly of empty methods that require an application specific application
-
-    """
-
-    def __init__(self, RcFileName, DaCycle=None):
-        """ The instance of an ObservationOperator is application dependent """
-        self.Identifier = self.getid()
-        self.Version = self.getversion()
-        self.RestartFileList = []
-        self.outputdir = None # Needed for opening the samples.nc files created 
-
-        self.LoadRc(RcFilename)   # load the specified rc-file
-        self.ValidateRc()         # validate the contents
-
-        logging.info('Observation Operator object initialized: %s' % self.Identifier)
-
-        # The following code allows the object to be initialized with a DaCycle object already present. Otherwise, it can
-        # be added at a later moment.
-
-        if DaCycle != None:
-            self.DaCycle = DaCycle
-        else:
-            self.DaCycle = {}
-
-    def getid(self):
-        return identifier
-
-    def getversion(self):
-        return version
-    
-    def GetInitialData(self):
-        """ This method places all initial data needed by an ObservationOperator in the proper folder for the model """
-
-        return None
-
-    def __str__(self):
-        return "This is a %s object, version %s" % (self.Identifier, self.Version)
-
-    def Initialize(self):
-        """ Perform all steps necessary to start the observation operator through a simple Run() call """
-
-    def ValidateInput(self):
-        """ Make sure that data needed for the ObservationOperator (such as observation input lists, or parameter files)
-            are present.
-        """
-    def SaveData(self):
-        """ Write the data that is needed for a restart or recovery of the Observation Operator to the save directory """
-
-
-
-################### End Class ObservationOperator ###################
-
-
-
-if __name__ == "__main__":
-    pass
diff --git a/da/baseclasses/optimizer.py b/da/baseclasses/optimizer.py
deleted file mode 100755
index cd9d2ef4363022360b14ac4f77b4f19d31557e6b..0000000000000000000000000000000000000000
--- a/da/baseclasses/optimizer.py
+++ /dev/null
@@ -1,428 +0,0 @@
-#!/usr/bin/env python
-# optimizer.py
-
-"""
-.. module:: optimizer
-.. moduleauthor:: Wouter Peters 
-
-Revision History:
-File created on 28 Jul 2010.
-
-"""
-
-import os
-import sys
-import logging
-import datetime
-import numpy as np
-import numpy.linalg as la
-
-
-identifier = 'Optimizer baseclass'
-version = '0.0'
-
-################### Begin Class Optimizer ###################
-
-class Optimizer(object):
-    """
-        This creates an instance of an optimization object. It handles the minimum least squares optimization
-        of the state vector given a set of sample objects. Two routines will be implemented: one where the optimization
-        is sequential and one where it is the equivalent matrix solution. The choice can be made based on considerations of speed
-        and efficiency.
-    """
-
-    def __init__(self):
-        self.Identifier = self.getid()
-        self.Version = self.getversion()
-
-        logging.info('Optimizer object initialized: %s' % self.Identifier)
-
-    def getid(self):
-        return identifier
-
-    def getversion(self):
-        return version
-
-    def Initialize(self, dims):
-        self.nlag = dims[0]
-        self.nmembers = dims[1]
-        self.nparams = dims[2]
-        self.nobs = dims[3]
-        self.CreateMatrices()
-
-
-    def CreateMatrices(self):
-        """ Create Matrix space needed in optimization routine """
-        
-        # mean state  [X]
-        self.x = np.zeros((self.nlag * self.nparams,), float)
-        # deviations from mean state  [X']
-        self.X_prime = np.zeros((self.nlag * self.nparams, self.nmembers,), float)
-        # mean state, transported to observation space [ H(X) ]
-        self.Hx = np.zeros((self.nobs,), float)
-        # deviations from mean state, transported to observation space [ H(X') ]
-        self.HX_prime = np.zeros((self.nobs, self.nmembers), float)
-        # observations
-        self.obs = np.zeros((self.nobs,), float)
-        # covariance of observations
-        self.R = np.zeros((self.nobs, self.nobs,), float)
-        # localization of obs
-        self.may_localize = np.zeros(self.nobs, bool)
-        # rejection of obs
-        self.may_reject = np.zeros(self.nobs, bool)
-        # flags of obs
-        self.flags = np.zeros(self.nobs, int)
-
-        # Total covariance of fluxes and obs in units of obs [H P H^t + R]
-        self.HPHR = np.zeros((self.nobs, self.nobs,), float)
-        # Kalman Gain matrix
-        self.KG = np.zeros((self.nlag * self.nparams, self.nobs,), float)
-
-    def StateToMatrix(self, StateVector):
-        try:
-            import matplotlib.pyplot as plt
-        except:
-            pass
-
-        allobs = []      # collect all obs for n=1,..,nlag
-        allmdm = []      # collect all mdm for n=1,..,nlag
-        allsamples = []  # collect all model samples for n=1,..,nlag
-        allreject = []  # collect all model samples for n=1,..,nlag
-        alllocalize = []  # collect all model samples for n=1,..,nlag
-        allflags = []  # collect all model samples for n=1,..,nlag
-        allmemsamples = None  # collect all members model samples for n=1,..,nlag
-
-        for n in range(self.nlag):
-            members = StateVector.EnsembleMembers[n]
-            self.x[n * self.nparams:(n + 1) * self.nparams] = members[0].ParameterValues
-            self.X_prime[n * self.nparams:(n + 1) * self.nparams, :] = np.transpose(np.array([m.ParameterValues for m in members]))
-
-            if members[0].ModelSample != None:
-                self.rejection_threshold = members[0].ModelSample.rejection_threshold
-
-                allreject.extend(members[0].ModelSample.Data.getvalues('may_reject'))
-                alllocalize.extend(members[0].ModelSample.Data.getvalues('may_localize'))
-                allflags.extend(members[0].ModelSample.Data.getvalues('flag'))
-
-                allobs.extend(members[0].ModelSample.Data.getvalues('obs'))
-                allsamples.extend(members[0].ModelSample.Data.getvalues('simulated'))
-                allmdm.extend(members[0].ModelSample.Data.getvalues('mdm'))
-
-                memsamples = []
-                for mem in members:
-                    memsamples.append(mem.ModelSample.Data.getvalues('simulated')) 
-
-                if allmemsamples == None :
-                    allmemsamples = np.array(memsamples)
-                else:
-                    allmemsamples = np.concatenate((allmemsamples, np.array(memsamples)), axis=1)
-
-        self.HX_prime[:, :] = np.transpose(allmemsamples)
-
-        self.obs[:] = np.array(allobs)
-        self.Hx[:] = np.array(allsamples)
-
-        self.may_reject[:] = np.array(allreject)
-        self.may_localize[:] = np.array(alllocalize)
-        self.flags[:] = np.array(allflags)
-
-        self.X_prime = self.X_prime - self.x[:, np.newaxis] # make into a deviation matrix
-        self.HX_prime = self.HX_prime - self.Hx[:, np.newaxis] # make a deviation matrix
-
-        for i, mdm in enumerate(allmdm):
-            self.R[i, i] = mdm ** 2
-
-     
-
-    def MatrixToState(self, StateVector):
-        for n in range(self.nlag):
-            members = StateVector.EnsembleMembers[n]
-            for m, mem in enumerate(members):
-                members[m].ParameterValues[:] = self.X_prime[n * self.nparams:(n + 1) * self.nparams, m] + self.x[n * self.nparams:(n + 1) * self.nparams]     
-
-        StateVector.isOptimized = True
-        logging.debug('Returning optimized data to the StateVector, setting "StateVector.isOptimized = True" ')
-
-    def WriteDiagnostics(self, DaCycle, StateVector, type='prior'):
-        """
-            Open a NetCDF file and write diagnostic output from optimization process:
-
-                - calculated residuals
-                - model-data mismatches
-                - HPH^T
-                - prior ensemble of samples
-                - posterior ensemble of samples
-                - prior ensemble of fluxes
-                - posterior ensemble of fluxes
-
-            The type designation refers to the writing of prior or posterior data and is used in naming the variables"
-        """
-        import da.tools.io4 as io
-        #import da.tools.io as io
-
-        filename = os.path.join(DaCycle['dir.diagnostics'], 'optimizer.%s.nc' % DaCycle['time.start'].strftime('%Y%m%d'))
-        
-        #LU wtf w sumie?
-        DaCycle.OutputFileList += (filename,)
-
-        # Open or create file
-
-        if type == 'prior':
-            f = io.CT_CDF(filename, method='create')
-            logging.debug('Creating new diagnostics file for optimizer (%s)' % filename)
-        elif type == 'optimized':
-            f = io.CT_CDF(filename, method='write')
-            logging.debug('Opening existing diagnostics file for optimizer (%s)' % filename)
-
-        # Add dimensions 
-
-        dimparams = f.AddParamsDim(self.nparams)
-        dimmembers = f.AddMembersDim(self.nmembers)
-        dimlag = f.AddLagDim(self.nlag, unlimited=False)
-        dimobs = f.AddObsDim(self.nobs)
-        dimstate = f.AddDim('nstate', self.nparams * self.nlag)
-
-        # Add data, first the ones that are written both before and after the optimization
-        # x, X_prime, Hx, HX_prime
-
-        savedict = io.std_savedict.copy() 
-        savedict['name'] = "statevectormean_%s" % type
-        savedict['long_name'] = "full_statevector_mean_%s" % type
-        savedict['units'] = "unitless"
-        savedict['dims'] = dimstate
-        savedict['values'] = self.x.tolist()
-        savedict['comment'] = 'Full %s state vector mean ' % type
-        f.AddData(savedict)
-
-        savedict = io.std_savedict.copy()
-        savedict['name'] = "statevectordeviations_%s" % type
-        savedict['long_name'] = "full_statevector_deviations_%s" % type
-        savedict['units'] = "unitless"
-        savedict['dims'] = dimstate + dimmembers
-        savedict['values'] = self.X_prime.tolist()
-        savedict['comment'] = 'Full state vector %s deviations as resulting from the optimizer' % type
-        f.AddData(savedict)
-
-        savedict = io.std_savedict.copy()
-        savedict['name'] = "modelsamplesmean_%s" % type
-        savedict['long_name'] = "modelsamplesforecastmean_%s" % type
-        savedict['units'] = "mol mol-1"
-        savedict['dims'] = dimobs
-        savedict['values'] = self.Hx.tolist()
-        savedict['comment'] = '%s mean mixing ratios based on %s state vector' % (type, type,)
-        f.AddData(savedict)
-
-        savedict = io.std_savedict.copy()
-        savedict['name'] = "modelsamplesdeviations_%s" % type
-        savedict['long_name'] = "modelsamplesforecastdeviations_%s" % type
-        savedict['units'] = "mol mol-1"
-        savedict['dims'] = dimobs + dimmembers
-        savedict['values'] = self.HX_prime.tolist()
-        savedict['comment'] = '%s mixing ratio deviations based on %s state vector' % (type, type,)
-        f.AddData(savedict)
-
-        # Continue with prior only data: obs, R
-
-        if type == 'prior':
-            savedict = io.std_savedict.copy()
-            savedict['name'] = "observed"
-            savedict['long_name'] = "observedvalues"
-            savedict['units'] = "mol mol-1"
-            savedict['dims'] = dimobs
-            savedict['values'] = self.obs.tolist()
-            savedict['comment'] = 'Observations used in optimization'
-            f.AddData(savedict)
-
-            savedict = io.std_savedict.copy()
-            savedict['name'] = "modeldatamismatch"
-            savedict['long_name'] = "modeldatamismatch"
-            savedict['units'] = "[mol mol-1]^2"
-            savedict['dims'] = dimobs + dimobs
-            savedict['values'] = self.R.tolist()
-            savedict['comment'] = 'Variance of mole fractions resulting from model-data mismatch'
-            f.AddData(savedict)
-
-        # Continue with posterior only data: HPHR, flags, KG
-
-        elif type == 'optimized':
-            savedict = io.std_savedict.copy()
-            savedict['name'] = "molefractionvariance"
-            savedict['long_name'] = "molefractionvariance"
-            savedict['units'] = "[mol mol-1]^2"
-            savedict['dims'] = dimobs + dimobs
-            savedict['values'] = self.HPHR.tolist()
-            savedict['comment'] = 'Variance of mole fractions resulting from prior state and model-data mismatch'
-            f.AddData(savedict)
-
-            savedict = io.std_savedict.copy()
-            savedict['name'] = "flag"
-            savedict['long_name'] = "flag_for_obs_model"
-            savedict['units'] = "None"
-            savedict['dims'] = dimobs
-            savedict['values'] = self.flags.tolist()
-            savedict['comment'] = 'Flag (0/1/2/99) for observation value, 0 means okay, 1 means QC error, 2 means rejected, 99 means not sampled'
-            f.AddData(savedict)
-
-            savedict = io.std_savedict.copy()
-            savedict['name'] = "kalmangainmatrix"
-            savedict['long_name'] = "kalmangainmatrix"
-            savedict['units'] = "unitless molefraction-1"
-            savedict['dims'] = dimstate + dimobs
-            savedict['values'] = self.KG.tolist()
-            savedict['comment'] = 'Kalman gain matrix of all obs and state vector elements'
-            f.AddData(savedict)
-#LU not sure, but should b like that
-        f.close()
-    
-    def SerialMinimumLeastSquares(self):
-        """ Make minimum least squares solution by looping over obs"""
-
-        tvalue = 1.97591
-        for n in range(self.nobs):
-            # Screen for flagged observations (for instance site not found, or no sample written from model)
-            if self.flags[n] != 0:
-                logging.debug('Skipping observation %d because of flag value %d' % (n, self.flags[n]))
-                continue
-
-            # Screen for outliers greather than 3x model-data mismatch, only apply if obs may be rejected
-            res = self.obs[n] - self.Hx[n]
-            
-            if self.may_reject[n]:
-                threshold = self.rejection_threshold * np.sqrt(self.R[n, n])
-                if np.abs(res) > threshold:
-                    logging.debug('Rejecting observation %d because residual (%f) exceeds threshold (%f)' % (n, res, threshold))
-                    self.flags[n] == 2
-                    continue
-
-            PHt = 1. / (self.nmembers - 1) * np.dot(self.X_prime, self.HX_prime[n, :])
-            self.HPHR[n, n] = 1. / (self.nmembers - 1) * (self.HX_prime[n, :] * self.HX_prime[n, :]).sum() + self.R[n, n]
-
-            self.KG[:, n] = PHt / self.HPHR[n, n]
-
-            if self.may_localize[n]:
-                dummy = self.Localize(n)
-                logging.debug('Localized observation %d' % (n,))
-            else:
-                logging.debug('Not allowed to Localize observation %d' % (n,))
-
-            alpha = np.double(1.0) / (np.double(1.0) + np.sqrt((self.R[n, n]) / self.HPHR[n, n]))
-
-            self.x[:] = self.x + self.KG[:, n] * res
-
-            for r in range(self.nmembers):
-                self.X_prime[:, r] = self.X_prime[:, r] - alpha * self.KG[:, n] * (self.HX_prime[n, r])
-
-#WP !!!! Very important to first do all obervations from n=1 through the end, and only then update 1,...,n. The current observation
-#WP      should always be updated last because it features in the loop of the adjustments !!!!
-
-            for m in range(n + 1, self.nobs):
-                res = self.obs[n] - self.Hx[n]
-                fac = 1.0 / (self.nmembers - 1) * (self.HX_prime[n, :] * self.HX_prime[m, :]).sum() / self.HPHR[n, n]
-                self.Hx[m] = self.Hx[m] + fac * res
-                self.HX_prime[m, :] = self.HX_prime[m, :] - alpha * fac * self.HX_prime[n, :]
-
-            for m in range(1, n + 1):
-                res = self.obs[n] - self.Hx[n]
-                fac = 1.0 / (self.nmembers - 1) * (self.HX_prime[n, :] * self.HX_prime[m, :]).sum() / self.HPHR[n, n]
-                self.Hx[m] = self.Hx[m] + fac * res
-                self.HX_prime[m, :] = self.HX_prime[m, :] - alpha * fac * self.HX_prime[n, :]
-
-            
-    def BulkMinimumLeastSquares(self):
-        """ Make minimum least squares solution by solving matrix equations"""
-        # Create full solution, first calculate the mean of the posterior analysis
-
-        HPH = np.dot(self.HX_prime, np.transpose(self.HX_prime)) / (self.nmembers - 1)   # HPH = 1/N * HX' * (HX')^T
-        self.HPHR[:, :] = HPH + self.R                                                            # HPHR = HPH + R
-        HPb = np.dot(self.X_prime, np.transpose(self.HX_prime)) / (self.nmembers - 1)    # HP = 1/N X' * (HX')^T
-        self.KG[:, :] = np.dot(HPb, la.inv(self.HPHR))                                         # K = HP/(HPH+R)
-
-        for n in range(self.nobs):
-            dummy = self.Localize(n)
-
-        self.x[:] = self.x + np.dot(self.KG, self.obs - self.Hx)                             # xa = xp + K (y-Hx)
-
-        # And next make the updated ensemble deviations. Note that we calculate P by using the full equation (10) at once, and 
-        # not in a serial update fashion as described in Whitaker and Hamill. 
-        # For the current problem with limited N_obs this is easier, or at least more straightforward to do.
-
-        I = np.identity(self.nlag * self.nparams)
-        sHPHR = la.cholesky(self.HPHR)                                  # square root of HPH+R
-        part1 = np.dot(HPb, np.transpose(la.inv(sHPHR)))                 # HP(sqrt(HPH+R))^-1
-        part2 = la.inv(sHPHR + np.sqrt(self.R))                           # (sqrt(HPH+R)+sqrt(R))^-1
-        Kw = np.dot(part1, part2)                                     # K~
-        self.X_prime[:, :] = np.dot(I, self.X_prime) - np.dot(Kw, self.HX_prime)         # HX' = I - K~ * HX'
-
-        P_opt = np.dot(self.X_prime, np.transpose(self.X_prime)) / (self.nmembers - 1)
-
-        # Now do the adjustments of the modeled mixing ratios using the linearized ensemble. These are not strictly needed but can be used
-        # for diagnosis.
-
-        part3 = np.dot(HPH, np.transpose(la.inv(sHPHR)))                           # HPH(sqrt(HPH+R))^-1
-        Kw = np.dot(part3, part2)                                               # K~
-        self.Hx[:] = self.Hx + np.dot(np.dot(HPH, la.inv(self.HPHR)), self.obs - self.Hx)  # Hx  = Hx+ HPH/HPH+R (y-Hx)
-        self.HX_prime[:, :] = self.HX_prime - np.dot(Kw, self.HX_prime)                            # HX' = HX'- K~ * HX'
-
-        logging.info('Minimum Least Squares solution was calculated, returning')
-
-
-    def SetLocalization(self):
-        """ determine which localization to use """
-
-        self.localization = True
-        self.localizetype = "None"    
-        
-        logging.info("Current localization option is set to %s" % self.localizetype)
-
-    def Localize(self, n):
-        """ localize the Kalman Gain matrix """
-
-    #LU whaat? sth is left for then? or just a baseclass
-        if not self.localization: return 
-
-        return
-
-################### End Class Optimizer ###################
-
-
-
-if __name__ == "__main__":
-
-    sys.path.append('../../')
-
-    import os
-    import sys
-    from da.tools.general import StartLogger 
-    from da.tools.initexit import CycleControl 
-    from da.ct.statevector import CtStateVector, PrepareState
-    from da.ct.obs import CtObservations
-    import numpy as np
-    import datetime
-    import da.tools.rc as rc
-
-    opts = ['-v']
-    args = {'rc':'../../da.rc', 'logfile':'da_initexit.log', 'jobrcfilename':'test.rc'}
-
-    StartLogger()
-    DaCycle = CycleControl(opts, args)
-
-    DaCycle.Initialize()
-    print DaCycle
-
-    StateVector = PrepareState(DaCycle)
-
-    samples = CtObservations(DaCycle.DaSystem, datetime.datetime(2005, 3, 5))
-    samples.AddObs()
-    samples.AddSimulations('/Users/peters/tmp/test_da/output/20050305/samples.000.nc')
-
-
-    nobs = len(samples.Data)
-    dims = (int(DaCycle['time.nlag']),
-                  int(DaCycle['da.optimizer.nmembers']),
-                  int(DaCycle.DaSystem['nparameters']),
-                  nobs,)
-
-    opt = CtOptimizer(dims)
-    opt.StateToMatrix(StateVector)
-    opt.MinimumLeastSquares()
-    opt.MatrixToState(StateVector)
diff --git a/da/baseclasses/platform.py b/da/baseclasses/platform.py
deleted file mode 100755
index 93f8279206b96586deac9ae4adb3a5e654ac7c2a..0000000000000000000000000000000000000000
--- a/da/baseclasses/platform.py
+++ /dev/null
@@ -1,160 +0,0 @@
-#!/usr/bin/env python
-# jobcontrol.py
-
-"""
-.. module:: platform
-.. moduleauthor:: Wouter Peters 
-
-Revision History:
-File created on 06 Sep 2010.
-
-The PlatForm class is found in the module :mod:`platform`, or in a specific implementation under the da/source tree. 
-
-The platform object holds attributes and methods that allow job control on each specific platform. This includes methods to create and submit jobs, but also to obtain process and/or job ID's. These are needed to control the flow of 
-the system on each platform.
-
-Typically, every platform needs specific implementations of this object (through inheritance), and you should refer to your specific PlatForm object documentation for details (see *da/platform/*).
-
-.. autoclass:: da.baseclasses.platform.PlatForm
-   :members:
-   :inherited-members:
-
-"""
-
-import sys
-import os
-import logging
-import subprocess
-
-std_joboptions = {'jobname':'test', 'jobaccount':'co2', 'jobnodes':'nserial 1', 'jobshell':'/bin/sh', 'depends':'', 'jobtime':'00:30:00'}
-
-class PlatForm(object):
-    """ 
-    This specifies platform dependent options under generic object calls. A platform object is used to control and submit jobs
-    """
-
-    def __init__(self):
-        """
-        The init function reports the hard-coded ``Identifier`` and ``Version`` of the PlatForm. Since each new
-        computer/user requires their own PlatForm object modifications, the init function is usually overwritten
-        in the specific implementation of this class
-        """
-        self.Identifier = 'iPad'    # the identifier gives the plaform name
-        self.Version = '1.0'     # the platform version used
-
-        msg1 = '%s object initialized' % self.Identifier ; logging.debug(msg1)
-        msg2 = '%s version: %s' % (self.Identifier, self.Version) ; logging.debug(msg2)
-
-
-    def __str__(self):
-        return self.Version
-
-    def GetJobTemplate(self, joboptions={}, block=False):
-        """ 
-        Returns the job template for a given computing system, and fill it with options from the dictionary provided as argument.
-        The job template should return the preamble of a job that can be submitted to a queue on your platform, 
-        examples of popular queuing systems are:
-            - SGE
-            - MOAB
-            - XGrid
-            -
-
-        A list of job options can be passed through a dictionary, which are then filled in on the proper line,
-        an example is for instance passing the dictionary {'account':'co2'} which will be placed 
-        after the ``-A`` flag in a ``qsub`` environment.
-
-        An extra option ``block`` has been added that allows the job template to be configured to block the current
-        job until the submitted job in this template has been completed fully.
-        """
-
-        template = """## \n""" + \
-                   """## This is a set of dummy names, to be replaced by values from the dictionary \n""" + \
-                   """## Please make your own platform specific template with your own keys and place it in a subfolder of the da package.\n """ + \
-                   """## \n""" + \
-                   """ \n""" + \
-                   """#$ jobname \n""" + \
-                   """#$ jobaccount \n""" + \
-                   """#$ jobnodes \n""" + \
-                   """#$ jobtime \n""" + \
-                   """#$ jobshell \n """
-
-        if 'depends' in joboptions:
-            template += """#$ -hold_jid depends \n"""
-
-        # First replace from passed dictionary
-        for k, v in joboptions.iteritems():
-            while k in template:
-                template = template.replace(k, v)
-
-        # Fill remaining values with std_options
-        for k, v in std_joboptions.iteritems():
-            while k in template:
-                template = template.replace(k, v)
-
-        return template
-
-    def GetMyID(self):
-        """ Return the process ID, or job ID of the current process or job"""
-        return os.getpid()
-
-    def WriteJob(self, jobfile, template, jobid):                   
-        """ 
-        This method writes a jobfile to the exec dir and makes it executable (mod 477)
-        """
-        file = open(jobfile, 'w')
-        file.write(template)
-        file.close()
-        os.chmod(jobfile, 477)                  
-
-        logging.debug("A job file was created (%s)" % jobfile)
-        
-
-    def SubmitJob(self, jobfile, joblog=None, block=False): 
-        """ 
-           :param jobfile: a string with the filename of a jobfile to run
-           :param joblog:  a string with the filename of a logfile to write run output to
-           :param block:  Boolean specifying whether to submit and continue (F), or submit and wait (T)
-           :rtype: integer
-
-        This method submits a jobfile to the queue, and returns the job ID 
-        """
-        from string import join
-       
-        cmd = ["sh", jobfile]
-        logging.info("A new task will be started (%s)" % cmd)
-        if block:
-            jobid = subprocess.call(cmd)
-        else:
-            jobid = subprocess.Popen(cmd).pid
-
-# info ...
-        infotext = []
-        infotext.append('\n')
-        infotext.append('Summary:\n')
-        infotext.append('\n')
-        infotext.append('job script      : %s\n' % jobfile)
-        infotext.append('job log         : %s\n' % joblog)
-        infotext.append('\n')
-        infotext.append('To manage this process:\n')
-        infotext.append('\n')
-        infotext.append('  # kill process:\n')
-        infotext.append('  kill %i\n' % jobid)
-        infotext.append('  \n')
-        infotext.append('\n')
-        # write to log:
-        for line in infotext : logging.info(line.strip())
-
-
-    def KillJob(self, jobid):                   
-        """ This method kills a running job """
-        return None
-
-    def StatJob(self, jobid):                   
-        """ This method gets the status of a running job """
-        import subprocess
-        output = subprocess.Popen(['qstat', jobid], stdout=subprocess.PIPE).communicate()[0]  ; logging.info(output)
-        return output
-
-
-if __name__ == "__main__":
-    pass
diff --git a/da/baseclasses/statevector.py b/da/baseclasses/statevector.py
deleted file mode 100755
index 4c0e4a515f13575414c50b373af5ea658981ff52..0000000000000000000000000000000000000000
--- a/da/baseclasses/statevector.py
+++ /dev/null
@@ -1,656 +0,0 @@
-#!/usr/bin/env python
-# ct_statevector_tools.py
-
-"""
-.. module:: statevector
-.. moduleauthor:: Wouter Peters 
-
-Revision History:
-File created on 28 Jul 2010.
-
-The module statevector implements the data structure and methods needed to work with state vectors (a set of unknown parameters to be optimized by a DA system) of different lengths, types, and configurations. Two baseclasses together form a generic framework:
-    * :class:`~da.baseclasses.statevector.StateVector`
-    * :class:`~da.baseclasses.statevector.EnsembleMember`
-
-As usual, specific implementations of StateVector objects are done through inheritance form these baseclasses. An example of designing 
-your own baseclass StateVector we refer to :ref:`tut_chapter5`.
-
-.. autoclass:: da.baseclasses.statevector.StateVector 
-
-.. autoclass:: da.baseclasses.statevector.EnsembleMember 
-
-"""
-
-import os
-import sys
-import logging
-import numpy as np
-import da.tools.io4 as io
-import datetime
-
-identifier = 'Baseclass Statevector '
-version = '0.0'
-
-################### Begin Class EnsembleMember ###################
-
-class EnsembleMember(object):
-    """ 
-        An ensemble member object consists of:
-           * a member number
-           * parameter values
-           * an observation object to hold sampled values for this member
-
-        Ensemble members are initialized by passing only an ensemble member number, all data is added by methods 
-        from the :class:`~da.baseclasses.statevector.StateVector`. Ensemble member objects have almost no functionality 
-        except to write their data to file using method :meth:`~da.baseclasses.statevector.EnsembleMember.WriteToFile`
-
-        .. automethod:: da.baseclasses.statevector.EnsembleMember.__init__ 
-        .. automethod:: da.baseclasses.statevector.EnsembleMember.WriteToFile 
-        .. automethod:: da.baseclasses.statevector.EnsembleMember.AddCustomFields 
-
-    """
-
-    def __init__(self, membernumber):
-        """
-           :param memberno: integer ensemble number
-           :rtype: None
-
-           An EnsembleMember object is initialized with only a number, and holds two attributes as containter for later
-           data:
-                * ParameterValues, will hold the actual values of the parameters for this data
-                * ModelSample, will hold an :class:`~da.baseclasses.obs.Observation` object and the model samples resulting from this members' data
-
-        """
-
-        self.membernumber = membernumber   # the member number
-        self.ParameterValues = None           # Parameter values of this member
-        self.ModelSample = None           # Model Sampled Parameter values of this member
-
-    def __str__(self):
-        return "%03d" % self.membernumber
-
-################### End Class EnsembleMember ###################
-
-################### Begin Class StateVector ###################
-
-import numpy as np
-class StateVector(object):
-    """ 
-    The StateVector object first of all contains the data structure of a statevector, defined by 3 attributes that define the 
-    dimensions of the problem in parameter space:
-        * nlag
-        * nparameters
-        * nmembers
-
-    The fourth important dimension `nobs` is not related to the StateVector directly but is initialized to 0, and later on 
-    modified to be used in other parts of the pipeline:
-        * nobs
-
-    These values are set as soon as the :meth:`~da.baseclasses.statevector.StateVector.Initialize` is called from the :ref:`pipeline`. 
-    Additionally, the value of attribute `isOptimized` is set to `False` indicating that the StateVector holds a-priori values 
-    and has not been modified by the :ref:`optimizer`.
-
-    StateVector objects can be filled with data in two ways
-        1. By reading the data from file
-        2. By creating the data through a set of method calls
-
-    Option (1) is invoked using method :meth:`~da.baseclasses.statevector.StateVector.ReadFromFile`. 
-    Option (2) consists of a call to method :meth:`~da.baseclasses.statevector.StateVector.MakeNewEnsemble`
-
-    Each option makes use of the same call to :meth:`~da.baseclasses.statevector.StateVector.GetNewMember`, to create a 
-    data container to be filled: the :class:`~da.baseclasses.statevector.EnsembleMember`.
-
-    Once the StateVector object has been filled with data, it is used in the pipeline and a few more methods are
-    invoked from there:
-        * :meth:`~da.baseclasses.statevector.StateVector.Propagate`, to advance the StateVector from t=t to t=t+1
-        * :meth:`~da.baseclasses.statevector.StateVector.WriteToFile`, to write the StateVector to a NetCDF file for later use
-
-    The methods are described below:
-
-    .. automethod:: da.baseclasses.statevector.StateVector.Initialize 
-    .. automethod:: da.baseclasses.statevector.StateVector.ReadFromFile
-    .. automethod:: da.baseclasses.statevector.StateVector.WriteToFile
-    .. automethod:: da.baseclasses.statevector.StateVector.MakeNewEnsemble
-    .. automethod:: da.baseclasses.statevector.StateVector.GetNewMember
-    .. automethod:: da.baseclasses.statevector.StateVector.Propagate
-    .. automethod:: da.baseclasses.statevector.StateVector.WriteMembersToFile
-
-    Finally, the StateVector can be mapped to a gridded array, or to a vector of TransCom regions, using:
-
-    .. automethod:: da.baseclasses.statevector.StateVector.GridToVector
-    .. automethod:: da.baseclasses.statevector.StateVector.VectorToGrid
-    .. automethod:: da.baseclasses.statevector.StateVector.VectorToTC
-    .. automethod:: da.baseclasses.statevector.StateVector.StateToTC
-
-    """
-
-    def __init__(self, DaCycle=None):
-        self.Identifier = self.getid()
-        self.Version = self.getversion()
-
-        # The following code allows the object to be initialized with a DaCycle object already present. Otherwise, it can
-        # be added at a later moment.
-
-        if DaCycle != None:
-            self.DaCycle = DaCycle
-        else:
-            self.DaCycle = {}
-
-        logging.info('Statevector object initialized: %s' % self.Identifier)
-
-    def getid(self):
-        return identifier
-
-    def getversion(self):
-        return version
-
-    def Initialize(self):
-        """
-        Initialize the object by specifying the dimensions. 
-        There are two major requirements for each statvector that you want to build:
-        
-            (1) is that the statevector can map itself onto a regular grid
-            (2) is that the statevector can map itself (mean+covariance) onto TransCom regions
-
-        An example is given below.
-        """
-        dims = (int(self.DaCycle['time.nlag']),
-                            int(self.DaCycle['da.optimizer.nmembers']),
-                            int(self.DaCycle.DaSystem['nparameters']),
-                           )
-
-
-        self.nlag = dims[0]
-        self.nmembers = dims[1]
-        self.nparams = dims[2]
-        self.nobs = 0
-        self.isOptimized = False
-
-        # These list objects hold the data for each time step of lag in the system. Note that the ensembles for each time step consist 
-        # of lists of EnsembleMember objects, we define member 0 as the mean of the distribution and n=1,...,nmembers as the spread.
-
-        self.EnsembleMembers = range(self.nlag)
-
-        for n in range(self.nlag):
-            self.EnsembleMembers[n] = []
-
-
-        # This specifies the file to read with the gridded mask at 1x1 degrees. Each gridbox holds a number that specifies the parametermember
-        #  that maps onto it. From this map, a dictionary is created that allows a reverse look-up so that we can map parameters to a grid.
-
-        mapfile = os.path.join(self.DaCycle.DaSystem['regionsfile'])
-        ncf = io.CT_Read(mapfile, 'read')
-        self.gridmap = ncf.GetVariable('regions')
-        self.tcmap = ncf.GetVariable('transcom_regions')
-        ncf.close()
-
-        logging.debug("A TransCom  map on 1x1 degree was read from file %s" % self.DaCycle.DaSystem['regionsfile'])
-        logging.debug("A parameter map on 1x1 degree was read from file %s" % self.DaCycle.DaSystem['regionsfile'])
-
-        # Create a dictionary for state <-> gridded map conversions
-
-        nparams = self.gridmap.max()
-        self.griddict = {}
-        for r in range(1, nparams + 1):
-             sel = (self.gridmap.flat == r).nonzero()
-             if len(sel[0]) > 0: 
-                 self.griddict[r] = sel
-
-        logging.debug("A dictionary to map grids to states and vice versa was created")
-
-        # Create a matrix for state <-> TransCom conversions
-
-        self.tcmatrix = np.zeros((self.nparams, 23,), 'float') 
-
-        for r in range(1, self.nparams + 1):
-             sel = (self.gridmap.flat == r).nonzero()
-             if len(sel[0]) < 1: 
-                 continue
-             else:
-                 n_tc = set(self.tcmap.flatten().take(sel[0]))
-                 if len(n_tc) > 1: 
-                     msg = "Parameter %d seems to map to multiple TransCom regions (%s), I do not know how to handle this" % (r, n_tc) ;logging.error(msg) 
-                     raise ValueError
-                 self.tcmatrix[r - 1, n_tc.pop() - 1] = 1.0
-
-        msg = "A matrix to map states to TransCom regions and vice versa was created" ; logging.debug(msg)
-
-    def __str__(self):
-        return "This is a base class derived state vector object"
-
-    def MakeNewEnsemble(self, lag, covariancematrix=None):
-        """ 
-        :param lag: an integer indicating the time step in the lag order
-        :param covariancematrix: a matrix to draw random values from
-        :rtype: None
-    
-        Make a new ensemble, the attribute lag refers to the position in the state vector. 
-        Note that lag=1 means an index of 0 in python, hence the notation lag-1 in the indexing below.
-        The argument is thus referring to the lagged state vector as [1,2,3,4,5,..., nlag]
-
-        The optional covariance object to be passed holds a matrix of dimensions [nparams, nparams] which is
-        used to draw ensemblemembers from. If this argument is not passed it will ne substituted with an 
-        identity matrix of the same dimensions.
-
-        """    
-
-        if covariancematrix == None: 
-            covariancematrix = np.identity(self.nparams)
-
-        # Make a cholesky decomposition of the covariance matrix
-
-
-        U, s, Vh = np.linalg.svd(covariancematrix)
-        dof = np.sum(s) ** 2 / sum(s ** 2)
-        C = np.linalg.cholesky(covariancematrix)
-
-        logging.debug('Cholesky decomposition has succeeded ')
-        logging.debug('Appr. degrees of freedom in covariance matrix is %s' % (int(dof)))
-
-
-        # Create mean values 
-
-        NewMean = np.ones(self.nparams, float) # standard value for a new time step is 1.0
-
-        # If this is not the start of the filter, average previous two optimized steps into the mix
-
-        if lag == self.nlag and self.nlag >= 3:
-            NewMean += self.EnsembleMembers[lag - 2][0].ParameterValues + \
-                                           self.EnsembleMembers[lag - 3][0].ParameterValues 
-            NewMean = NewMean / 3.0
-
-        # Create the first ensemble member with a deviation of 0.0 and add to list
-
-        NewMember = self.GetNewMember(0)
-        NewMember.ParameterValues = NewMean.flatten()  # no deviations
-        self.EnsembleMembers[lag - 1].append(NewMember)
-
-        # Create members 1:nmembers and add to EnsembleMembers list
-
-        for member in range(1, self.nmembers):
-            randstate = np.random.get_state()
-            rands = np.random.randn(self.nparams)
-
-            NewMember = self.GetNewMember(member)
-            NewMember.ParameterValues = np.dot(C, rands) + NewMean
-            self.EnsembleMembers[lag - 1].append(NewMember)
-
-        logging.debug('%d new ensemble members were added to the state vector # %d' % (self.nmembers, lag))
-
-    def GetNewMember(self, memberno):
-        """ 
-        :param memberno: an integer indicating the ensemble member number
-        :rtype: an empty ensemblemember object
-        
-        Return an ensemblemember object
-        """ 
-
-        return EnsembleMember(memberno)
-
-
-    def Propagate(self):
-        """ 
-        :rtype: None
-
-        Propagate the parameter values in the StateVector to the next cycle. This means a shift by one cycle 
-        step for all states that will
-        be optimized once more, and the creation of a new ensemble for the time step that just 
-        comes in for the first time (step=nlag). 
-        In the future, this routine can incorporate a formal propagation of the statevector.
-
-        """
-        from datetime import timedelta
-        # Remove State Vector n=1 by simply "popping" it from the list and appending a new empty list at the front. This empty list will
-        # hold the new ensemble for the new cycle 
-
-        self.EnsembleMembers.pop(0)
-        self.EnsembleMembers.append([])
-
-        # And now create a new time step of mean + members for n=nlag
-
-    
-        date = self.DaCycle['time.start'] + timedelta(days=(self.nlag - 0.5) * int(self.DaCycle['time.cycle']))
-        cov = self.GetCovariance(date)
-        self.MakeNewEnsemble(self.nlag, cov)
-
-        logging.info('The state vector has been propagated by one cycle ')
-
-    def WriteToFile(self, filename):
-        """
-        :param filename: the full filename for the output NetCDF file
-        :rtype: None
-
-        Write the StateVector information to a NetCDF file for later use. 
-        In principle the output file will have only one two datasets inside 
-        called:
-            * `meanstate`, dimensions [nlag, nparamaters]
-            * `ensemblestate`, dimensions [nlag,nmembers, nparameters]
-
-        This NetCDF information can be read back into a StateVector object using 
-        :meth:`~da.baseclasses.statevector.StateVector.ReadFromFile`
-
-        """
-        #import da.tools.io4 as io
-        #import da.tools.io as io
-
-
-        if not self.isOptimized:
-            ncfile = io.CT_CDF(filename, method='create')
-            logging.debug('Creating new StateVector output file (%s)' % filename)
-            qual = 'prior'
-        else:
-            ncfile = io.CT_CDF(filename, method='write')
-            logging.debug('Opening existing StateVector output file (%s)' % filename)
-            qual = 'opt'
-
-        dimparams = ncfile.AddParamsDim(self.nparams)
-        dimmembers = ncfile.AddMembersDim(self.nmembers)
-        dimlag = ncfile.AddLagDim(self.nlag, unlimited=True)
-
-        for n in range(self.nlag):
-            members = self.EnsembleMembers[n]
-            MeanState = members[0].ParameterValues
-
-            savedict = ncfile.StandardVar(varname='meanstate_%s' % qual)
-            savedict['dims'] = dimlag + dimparams 
-            savedict['values'] = MeanState
-            savedict['count'] = n
-            savedict['comment'] = 'this represents the mean of the ensemble'
-            ncfile.AddData(savedict)
-
-            members = self.EnsembleMembers[n]
-            devs = np.asarray([m.ParameterValues.flatten() for m in members])
-            data = devs - np.asarray(MeanState)
-
-            savedict = ncfile.StandardVar(varname='ensemblestate_%s' % qual)
-            savedict['dims'] = dimlag + dimmembers + dimparams 
-            savedict['values'] = data
-            savedict['count'] = n
-            savedict['comment'] = 'this represents deviations from the mean of the ensemble'
-            ncfile.AddData(savedict)
-
-        ncfile.close()
-        logging.info('Successfully wrote the State Vector to file (%s) ' % filename)
-
-    def ReadFromFile(self, filename, qual='opt'):
-        """ 
-        :param filename: the full filename for the input NetCDF file
-        :param qual: a string indicating whether to read the 'prior' or 'opt'(imized) StateVector from file
-        :rtype: None
-
-        Read the StateVector information from a NetCDF file and put in a StateVector object
-        In principle the input file will have only one four datasets inside 
-        called:
-            * `meanstate_prior`, dimensions [nlag, nparamaters]
-            * `ensemblestate_prior`, dimensions [nlag,nmembers, nparameters]
-            * `meanstate_opt`, dimensions [nlag, nparamaters]
-            * `ensemblestate_opt`, dimensions [nlag,nmembers, nparameters]
-
-        This NetCDF information can be written to file using 
-        :meth:`~da.baseclasses.statevector.StateVector.WriteToFile`
-
-        """
-
-        f = io.CT_Read(filename, 'read')
-        MeanState = f.GetVariable('statevectormean_' + qual)
-        EnsembleMembers = f.GetVariable('statevectorensemble_' + qual)
-        f.close()
-
-    
-        for n in range(self.nlag):
-            if not self.EnsembleMembers[n] == []:
-                self.EnsembleMembers[n] = []
-                logging.warning('Existing ensemble for lag=%d was removed to make place for newly read data' % (n + 1,))
-
-            for m in range(self.nmembers):
-                NewMember = self.GetNewMember(m)
-                NewMember.ParameterValues = EnsembleMembers[n, m, :].flatten() + MeanState[n]  # add the mean to the deviations to hold the full parameter values
-                self.EnsembleMembers[n].append(NewMember)
-
-        logging.info('Successfully read the State Vector from file (%s) ' % filename)
-
-    def WriteMembersToFile(self, lag):
-        """ 
-           :param: lag: Which lag step of the filter to write, must lie in range [1,...,nlag]
-           :rtype: None
-
-           Write ensemble member information to a NetCDF file for later use. The standard output filename is 
-           *parameters.DDD.nc* where *DDD* is the number of the ensemble member. Standard output file location 
-           is the `dir.input` of the DaCycle object. In principle the output file will have only two datasets inside 
-           called `parametervalues` which is of dimensions `nparameters` and `parametermap` which is of dimensions (180,360). 
-           This dataset can be read and used by a :class:`~da.baseclasses.observationoperator.ObservationOperator` object. 
-
-           .. note:: if more, or other information is needed to complete the sampling of the ObservationOperator you
-                     can simply inherit from the StateVector baseclass and overwrite this WriteMembersToFile function.
-
-        """
-
-        # These import statements caused a crash in netCDF4 on MacOSX. No problems on Jet though. Solution was
-        # to do the import already at the start of the module, not just in this method.
-           
-        #import da.tools.io as io
-        #import da.tools.io4 as io
-
-        outdir = self.DaCycle['dir.input']
-        members = self.EnsembleMembers[lag - 1]
-
-        for mem in members:
-            filename = os.path.join(outdir, 'parameters.%03d.nc' % mem.membernumber)
-            ncf = io.CT_CDF(filename, method='create')
-            dimparams = ncf.AddParamsDim(self.nparams)
-            dimgrid = ncf.AddLatLonDim()
-
-            data = mem.ParameterValues
-
-            savedict = io.std_savedict.copy()
-            savedict['name'] = "parametervalues"
-            savedict['long_name'] = "parameter_values_for_member_%d" % mem.membernumber
-            savedict['units'] = "unitless"
-            savedict['dims'] = dimparams 
-            savedict['values'] = data
-            savedict['comment'] = 'These are parameter values to use for member %d' % mem.membernumber
-            ncf.AddData(savedict)
-
-            griddata = self.VectorToGrid(vectordata=data)
-            
-            savedict = io.std_savedict.copy()
-            savedict['name'] = "parametermap"
-            savedict['long_name'] = "parametermap_for_member_%d" % mem.membernumber
-            savedict['units'] = "unitless"
-            savedict['dims'] = dimgrid 
-            savedict['values'] = griddata.tolist()
-            savedict['comment'] = 'These are gridded parameter values to use for member %d' % mem.membernumber
-            ncf.AddData(savedict)
-            
-            ncf.close()
-
-            logging.debug('Successfully wrote data from ensemble member %d to file (%s) ' % (mem.membernumber, filename))
-
-    def GridToVector(self, griddata=None, method='avg'):
-        """ 
-            Map gridded data onto a vector of length (nparams,)
-
-           :param griddata: a gridded dataset to use. This dataset is mapped onto a vector of length `nparams`
-           :param method: a string that specifies the method to combine grid boxes in case reverse=True. Must be either ['avg','sum','minval']
-           :rtype: ndarray: size (nparameters,)
-
-           This method makes use of a dictionary that links every parameter number [1,...,nparams] to a series of gridindices. These 
-           indices specify a location on a 360x180 array, stretched into a vector using `array.flat`. There are multiple ways of calling 
-           this method::
-
-               values       = self.GridToVector(griddata=mygriddeddata,method='minval') # 
-                                                                    using the minimum value of all datapoints covered by that parameter index
-
-               values       = self.GridToVector(griddata=mygriddeddata,method='avg') # 
-                                                                    using the average value of all datapoints covered by that parameter index
-
-               values       = self.GridToVector(griddata=mygriddeddata,method='sum') # 
-                                                                    using the sum of values of all datapoints covered by that parameter index
-
-           .. note:: This method uses a DaSystem object that must be initialized with a proper parameter map. See :class:`~da.baseclasses.dasystem` for details
-
-        """
-
-        methods = ['avg', 'sum', 'minval']
-        if method not in methods:
-            logging.error("To put data from a map into the statevector, please specify the method to use (%s)" % methods)
-            raise ValueError
-
-        result = np.zeros(self.nparams, float)
-        for k, v in self.griddict.iteritems():
-            #print k,k-1,result.shape, v
-            if method == "avg": 
-                result[k - 1] = griddata.take(v).mean()
-            elif method == "sum" : 
-                result[k - 1] = griddata.take(v).sum()
-            elif method == "minval" : 
-                result[k - 1] = griddata.take(v).min()
-        return result # Note that the result is returned, but not yet placed in the member.ParameterValues attrtibute!
-
-
-    def VectorToGrid(self, vectordata=None):
-        """ 
-            Map vector elements to a map or vice cersa
-
-           :param vectordata: a vector dataset to use in case `reverse = False`. This dataset is mapped onto a 1x1 grid and must be of length `nparams`
-           :rtype: ndarray: an array of size (360,180,) 
-
-           This method makes use of a dictionary that links every parameter number [1,...,nparams] to a series of gridindices. These 
-           indices specify a location on a 360x180 array, stretched into a vector using `array.flat`. There are multiple ways of calling 
-           this method::
-
-               griddedarray = self.VectorToGrid(vectordata=ParameterValues) # simply puts the ParameterValues onto a (180,360,) array
-
-           .. note:: This method uses a DaSystem object that must be initialzied with a proper parameter map. See :class:`~da.baseclasses.dasystem` for details
-
-        """
-
-        result = np.zeros(self.gridmap.shape, float)
-        for k, v in self.griddict.iteritems():
-            #print k,v
-            result.put(v, vectordata[k - 1])
-
-        return result         
-
-    def VectorToTC(self, vectordata, cov=False):
-        """ 
-            project Vector onto TransCom regions 
-
-           :param vectordata: a vector dataset to use, must be of length `nparams`
-           :param cov: a Boolean to specify whether the input dataset is a vector (mean), or a matrix (covariance)
-           :rtype: ndarray: an array of size (23,) (cov:F) or of size (23,23,) (cov:T)
-        """
-        M = self.tcmatrix
-        if cov:
-            return np.dot(np.transpose(M), np.dot(vectordata, M))
-        else:
-            return np.dot(vectordata.squeeze(), M)
-
-    def StateToGrid(self, fluxvector=None, lag=1):
-        """ 
-            Transforms the StateVector information (mean + covariance) to a 1x1 degree grid.
-
-            :param: fluxvector: a vector of length (nparams,) that holds the fluxes associated with each parameter in the StateVector
-            :param: lag: the lag at which to evaluate the StateVector
-            :rtype: a tuple of two arrays (gridmean,gridvariance) with dimensions (180,360,)
-
-            If the attribute `fluxvector` is not passed, the function will return the mean parameter value and its variance on a 1x1 map.
-            
-            ..note:: Although we can return the variance information for each gridbox, the covariance information contained in the original ensemble is lost when mapping to 1x1 degree!
-
-        """
-
-        if fluxvector == None:
-            fluxvector = np.ones(self.nparams)
-
-        ensemble = self.EnsembleMembers[lag - 1]
-        ensemblemean = ensemble[0].ParameterValues
-
-        # First transform the mean
-
-        gridmean = self.VectorToGrid(vectordata=ensemblemean * fluxvector)
-
-        # And now the covariance, first create covariance matrix (!), and then multiply
-
-        deviations = np.array([mem.ParameterValues * fluxvector - ensemblemean for mem in ensemble])
-        variance = deviations.std(axis=0) ** 2
-        gridvar = self.VectorToGrid(variance)
-
-        return (gridmean, gridvar)
-
-    def StateToTC(self, fluxvector=None, lag=1):
-        """ 
-            Transforms the StateVector information (mean + covariance) to the TransCom regions.
-
-            :param: fluxvector: a vector of length (nparams,) that holds the fluxes associated with each parameter in the StateVector
-            :param: lag: the lag at which to evaluate the StateVector
-            :rtype: a tuple of two arrays (mean,covariance) with dimensions ((23,), (23,23,) )
-
-        """
-
-        ensemble = self.EnsembleMembers[lag - 1]
-        ensemblemean = ensemble[0].ParameterValues
-
-        # First transform the mean
-
-        mean = self.VectorToTC(vectordata=ensemble[0].ParameterValues * fluxvector)
-
-        # And now the covariance, first create covariance matrix (!), and then multiply
-
-        deviations = np.array([mem.ParameterValues * fluxvector - ensemblemean for mem in ensemble])
-        covariance = np.dot(np.transpose(deviations), deviations) / (self.nmembers - 1)
-        cov = self.VectorToTC(covariance, cov=True)
-
-        return (mean, cov)
-
-
-################### End Class StateVector ###################
-
-if __name__ == "__main__":
-
-    sys.path.append('../../')
-
-    import os
-    import sys
-    from da.tools.general import StartLogger 
-    from da.tools.initexit import CycleControl 
-    import numpy as np
-    import da.tools.rc as rc
-
-    opts = ['-v']
-    args = {'rc':'../../da.rc', 'logfile':'da_initexit.log', 'jobrcfilename':'test.rc'}
-
-    StartLogger()
-    DaCycle = CycleControl(opts, args)
-
-    print EnsembleMember(0)
-    
-
-    DaCycle.Initialize()
-    print DaCycle
-
-    dims = (int(DaCycle['time.nlag']),
-                  int(DaCycle['da.optimizer.nmembers']),
-                  int(DaCycle.DaSystem['nparameters']),
-                  )
-
-    StateVector = StateVector(dims)
-    StateVector.MakeNewEnsemble(lag=1)
-
-    members = StateVector.EnsembleMembers[0]
-
-    members[0].WriteToFile(DaCycle['dir.input'])
-
-    StateVector.Propagate()
-
-    savedir = DaCycle['dir.output']
-    filename = os.path.join(savedir, 'savestate.nc')
-
-    StateVector.WriteToFile(filename)
-
-
-    savedir = DaCycle['dir.output']
-    filename = os.path.join(savedir, 'savestate.nc')
-
-    StateVector.ReadFromFile(filename)
-
diff --git a/da/bin/readme_wrapper.txt b/da/bin/readme_wrapper.txt
deleted file mode 100644
index c48db083de391654c4fba750c86883a4afcb13e5..0000000000000000000000000000000000000000
--- a/da/bin/readme_wrapper.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-# Please copy the following line, quit this reader, paste it in the current directory, and press return. 
-# A file called tm5_mpi_wrapper will be created
-
-mpicc tm5_mpi_wrapper.c -o tm5_mpi_wrapper
diff --git a/da/bin/tm5_mpi_wrapper.c b/da/bin/tm5_mpi_wrapper.c
deleted file mode 100644
index ae4f5455041863a40e9c8a5c4d5af67ec82f6f73..0000000000000000000000000000000000000000
--- a/da/bin/tm5_mpi_wrapper.c
+++ /dev/null
@@ -1,61 +0,0 @@
-#include <stdio.h>
-#include <errno.h>
-#include "mpi.h"
-
-#define CMDLENGTH 200
-
-int safe_system (const char *command);
-
-int main(int argc, char *argv[]) { 
-
-  int ierr;
-  int myrank;
-  char cmd[CMDLENGTH];
-
-  ierr = MPI_Init(&argc, &argv);
-  if (argc != 2) {
-    fprintf(stderr, "[tm5_mpi_wrapper] Expecting 1 argument, got %d.\n",argc);
-    exit(-1);
-  }
-  MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
-  snprintf(cmd,CMDLENGTH,"%s %03d",argv[1],myrank);
-  //snprintf(cmd,CMDLENGTH,"%s %03d >> tm5.%03d.log",argv[1],myrank,myrank);//
-  printf( "MPI rank %d about to execute command \"%s\".\n",myrank,cmd );
-  ierr = safe_system(cmd);
-  if(ierr != 0) {
-     MPI_Abort(MPI_COMM_WORLD,ierr);
-     exit(ierr);
-  }
-  ierr = MPI_Finalize(  );
-  exit(ierr);
-}
-
-
-
-
-
-int safe_system (const char *command) {
-  int pid, status;
-
-  if (command == 0)
-    return 1;
-  pid = fork();
-  if (pid == -1) // fork failed
-    return -1;
-  if (pid == 0) {  // then this is the child
-    char *argv[4];
-    argv[0] = "sh";
-    argv[1] = "-c";
-    argv[2] = (char*)command;
-    argv[3] = 0;
-    execv("/bin/sh", argv);
-    _exit(127);
-  }
-  do {
-    if (waitpid(pid, &status, 0) == -1) {
-      if (errno != EINTR)
-        return -1;
-    } else
-      return status;
-  } while(1);
-}
diff --git a/da/ct/__init__.py b/da/ct/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/da/ct/dasystem.py b/da/ct/dasystem.py
deleted file mode 100755
index 12e66a95e965ab0297bf74d9ab99db20f6ab87d6..0000000000000000000000000000000000000000
--- a/da/ct/dasystem.py
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env python
-# control.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 26 Aug 2010.
-
-"""
-
-import os
-import sys
-import logging
-import datetime
-
-################### Begin Class CtDaSystem ###################
-
-from da.baseclasses.dasystem import DaSystem
-
-class CtDaSystem(DaSystem):
-    """ Information on the data assimilation system used. This is normally an rc-file with settings.
-    """
-
-    def Validate(self):
-        """ 
-        Validate the contents of the rc-file given a dictionary of required keys
-        """
-
-        needed_rc_items = ['obs.input.dir',
-                           'obs.input.fname',
-                           'ocn.covariance',
-                           'nparameters',
-                           'bio.covariance',
-                           'deltaco2.prefix',
-                           'regtype']
-
-
-        for k, v in self.iteritems():
-            if v == 'True' : self[k] = True
-            if v == 'False': self[k] = False
-
-        for key in needed_rc_items:
-            if not self.has_key(key):
-                msg = 'Missing a required value in rc-file : %s' % key
-                logging.error(msg)
-                raise IOError, msg
-
-        logging.debug('DA System Info settings have been validated succesfully')
-
-################### End Class CtDaSystem ###################
-
-
-if __name__ == "__main__":
-    pass
diff --git a/da/ct/obs.py b/da/ct/obs.py
deleted file mode 100755
index 702984c0a0077126fec84fcdfc347f8964b3cdf3..0000000000000000000000000000000000000000
--- a/da/ct/obs.py
+++ /dev/null
@@ -1,473 +0,0 @@
-#!/usr/bin/env python
-# obs.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 28 Jul 2010.
-
-"""
-import os
-import sys
-import logging
-import datetime
-
-sys.path.append(os.getcwd())
-
-identifier = 'CarbonTracker CO2 mixing ratios'
-version = '0.0'
-
-from da.baseclasses.obs import Observation
-
-################### Begin Class CtObservations ###################
-
-class CtObservations(Observation):
-    """ an object that holds data + methods and attributes needed to manipulate mixing ratio values """
-
-    def getid(self):
-        return identifier
-
-    def getversion(self):
-        return version
-
-    def getlength(self):
-        return len(self.Data)
-
-    def Initialize(self):
-
-        self.startdate = self.DaCycle['time.sample.start']
-        self.enddate = self.DaCycle['time.sample.end']
-        DaSystem = self.DaCycle.DaSystem
-
-        sfname = DaSystem['obs.input.fname']
-
-        if sfname.endswith('.nc'):
-            filename = os.path.join(DaSystem['obs.input.dir'], sfname)
-        else:
-            filename = os.path.join(DaSystem['obs.input.dir'], sfname + '.' + self.startdate.strftime('%Y%m%d') + '.nc')
-
-        if not os.path.exists(filename):
-            msg = 'Could not find  the required observation input file (%s) ' % filename 
-            logging.error(msg)
-            raise IOError, msg
-        else:
-            self.ObsFilename = filename
-
-        self.Data = MixingRatioList([])
-
-
-    def AddObs(self):
-        """ Returns a MixingRatioList holding individual MixingRatioSample objects for all obs in a file
-      
-            The CarbonTracker mixing ratio files are provided as one long list of obs for all possible dates. So we can 
-            either:
-            
-            (1) read all, and the subselect the data we will use in the rest of this cycle
-            (2) Use nco to make a subset of the data
-            
-            For now, we will stick with option (1) 
-        
-        """
-        import da.tools.io4 as io
-        import datetime as dtm
-        from string import strip, join
-        from numpy import array, logical_and
-
-        ncf = io.CT_Read(self.ObsFilename, 'read')
-        idates = ncf.GetVariable('date_components')
-        dates = array([dtm.datetime(*d) for d in idates])
-
-        subselect = logical_and(dates >= self.startdate , dates <= self.enddate).nonzero()[0]
-
-        dates = dates.take(subselect, axis=0)
-        
-        ids = ncf.GetVariable('id').take(subselect, axis=0)
-        evn = ncf.GetVariable('eventnumber').take(subselect, axis=0)
-        evn = [s.tostring().lower() for s in evn]
-        evn = map(strip, evn)
-        sites = ncf.GetVariable('site').take(subselect, axis=0)
-        sites = [s.tostring().lower() for s in sites]
-        sites = map(strip, sites)
-        lats = ncf.GetVariable('lat').take(subselect, axis=0)
-        lons = ncf.GetVariable('lon').take(subselect, axis=0)
-        alts = ncf.GetVariable('alt').take(subselect, axis=0)
-        obs = ncf.GetVariable('obs').take(subselect, axis=0)
-        species = ncf.GetVariable('species').take(subselect, axis=0)
-        species = [s.tostring().lower() for s in species]
-        species = map(strip, species)
-        date = ncf.GetVariable('date').take(subselect, axis=0)
-        strategy = ncf.GetVariable('sampling_strategy').take(subselect, axis=0)
-        flags = ncf.GetVariable('NOAA_QC_flags').take(subselect, axis=0)
-        flags = [s.tostring().lower() for s in flags]
-        flags = map(strip, flags)
-        flags = [int(f == '...') for f in flags]
-        ncf.close()
-
-        logging.debug("Successfully read data from obs file (%s)" % self.ObsFilename)
-
-        for n in range(len(dates)): 
-           self.Data.append(MixingRatioSample(ids[n], dates[n], sites[n], obs[n], 0.0, 0.0, 0.0, 0.0, flags[n], alts[n], lats[n], lons[n], evn[n], species[n], strategy[n], 0.0))
-
-        logging.debug("Added %d observations to the Data list" % len(dates))
-
-    def AddSimulations(self, filename, silent=True):
-        """ Adds model simulated values to the mixing ratio objects """
-
-        import da.tools.io4 as io
-
-        if not os.path.exists(filename):
-            msg = "Sample output filename for observations could not be found : %s" % filename
-            logging.error(msg)
-            logging.error("Did the sampling step succeed?")
-            logging.error("...exiting")
-            raise IOError, msg
-
-        ncf = io.CT_Read(filename, method='read')
-        ids = ncf.GetVariable('id')
-        simulated = ncf.GetVariable('flask')
-        ncf.close()
-        logging.info("Successfully read data from model sample file (%s)" % filename)
-
-        obs_ids = self.Data.getvalues('id')
-
-        obs_ids = obs_ids.tolist()
-        ids = map(int, ids)
-
-        missing_samples = []
-
-        for id, val in zip(ids, simulated): 
-            if id in obs_ids:
-                index = obs_ids.index(id)
-                #print id,val,val.shape
-                self.Data[index].simulated = val * 1e6  # to umol/mol
-            else:     
-                missing_samples.append(id)
-
-        if not silent and missing_samples != []:
-            logging.warning('Model samples were found that did not match any ID in the observation list. Skipping them...')
-            #msg = '%s'%missing_samples ; logging.warning(msg)
-
-        logging.debug("Added %d simulated values to the Data list" % (len(ids) - len(missing_samples)))
-
-    def WriteSampleInfo(self):
-        """ 
-            Write the information needed by the observation operator to a file. Return the filename that was written for later use
-
-        """
-        import shutil
-        #import da.tools.io as io
-        import da.tools.io4 as io
-        from da.tools.general import ToDectime
-
-        obsinputfile = os.path.join(self.DaCycle['dir.input'], 'observations_%s.nc' % self.DaCycle['time.sample.stamp'])
-
-        f = io.CT_CDF(obsinputfile, method='create')
-        logging.debug('Creating new observations file for ObservationOperator (%s)' % obsinputfile)
-
-        dimid = f.AddDim('id', len(self.Data))
-        dim30char = f.AddDim('string_of30chars', 30)
-        dim24char = f.AddDim('string_of24chars', 24)
-        dim10char = f.AddDim('string_of10chars', 10)
-        dimcalcomp = f.AddDim('calendar_components', 6)
-
-        data = self.Data.getvalues('id')
-
-        savedict = io.std_savedict.copy() 
-        savedict['name'] = "id"
-        savedict['dtype'] = "int"
-        savedict['long_name'] = "identification number"
-        savedict['units'] = "NA"
-        savedict['dims'] = dimid
-        savedict['values'] = data.tolist()
-        savedict['comment'] = "This is a unique identifier for each preprocessed observation used in CarbonTracker" 
-        f.AddData(savedict)
-
-        data = [ToDectime(d) for d in self.Data.getvalues('xdate') ]
-
-        savedict = io.std_savedict.copy() 
-        savedict['name'] = "decimal_date"
-        savedict['units'] = "years"
-        savedict['dims'] = dimid
-        savedict['values'] = data
-        savedict['missing_value'] = -1.e34
-        savedict['_FillValue'] = -1.e34
-        f.AddData(savedict)
-
-        data = [[d.year, d.month, d.day, d.hour, d.minute, d.second] for d in self.Data.getvalues('xdate') ]
-
-        savedict = io.std_savedict.copy() 
-        savedict['dtype'] = "int"
-        savedict['name'] = "date_components"
-        savedict['units'] = "integer components of UTC date"
-        savedict['dims'] = dimid + dimcalcomp
-        savedict['values'] = data
-        savedict['missing_value'] = -9
-        savedict['_FillValue'] = -9
-        savedict['comment'] = "Calendar date components as integers. Times and dates are UTC." 
-        savedict['order'] = "year, month, day, hour, minute, second"
-        f.AddData(savedict)
-
-        data = self.Data.getvalues('lat')
-
-        savedict = io.std_savedict.copy() 
-        savedict['name'] = "lat"
-        savedict['units'] = "degrees_north"
-        savedict['dims'] = dimid
-        savedict['values'] = data.tolist()
-        savedict['missing_value'] = -999.9
-        savedict['_FillValue'] = -999.9
-        f.AddData(savedict)
-
-        data = self.Data.getvalues('lon')
-
-        savedict = io.std_savedict.copy() 
-        savedict['name'] = "lon"
-        savedict['units'] = "degrees_east"
-        savedict['dims'] = dimid
-        savedict['values'] = data.tolist()
-        savedict['missing_value'] = -999.9
-        savedict['_FillValue'] = -999.9
-        f.AddData(savedict)
-
-        data = self.Data.getvalues('height')
-
-        savedict = io.std_savedict.copy() 
-        savedict['name'] = "alt"
-        savedict['units'] = "meters_above_sea_level"
-        savedict['dims'] = dimid
-        savedict['values'] = data.tolist()
-        savedict['missing_value'] = -999.9
-        savedict['_FillValue'] = -999.9
-        f.AddData(savedict)
-
-        data = self.Data.getvalues('samplingstrategy')
-
-        savedict = io.std_savedict.copy() 
-        savedict['dtype'] = "int"
-        savedict['name'] = "sampling_strategy"
-        savedict['units'] = "NA"
-        savedict['dims'] = dimid
-        savedict['values'] = data.tolist()
-        savedict['missing_value'] = -9
-        savedict['_FillValue'] = -9
-        f.AddData(savedict)
-
-        try:
-
-            data = self.Data.getvalues('evn')
-
-            savedict = io.std_savedict.copy() 
-            savedict['dtype'] = "char"
-            savedict['name'] = "eventnumber"
-            savedict['units'] = "NOAA database identifier"
-            savedict['dims'] = dimid + dim30char
-            savedict['values'] = data
-            savedict['missing_value'] = '-'
-            savedict['_FillValue'] = '-'
-            f.AddData(savedict)
-
-
-            data = self.Data.getvalues('species')
-
-            savedict = io.std_savedict.copy() 
-            savedict['dtype'] = "char"
-            savedict['name'] = "species"
-            savedict['units'] = "chemical_species_name"
-            savedict['dims'] = dimid + dim10char
-            savedict['values'] = data
-            savedict['missing_value'] = '-'
-            savedict['_FillValue'] = '-'
-            f.AddData(savedict)
-
-            data = self.Data.getvalues('code')
-
-            savedict = io.std_savedict.copy() 
-            savedict['dtype'] = "char"
-            savedict['name'] = "site"
-            savedict['units'] = "site_identifier"
-            savedict['dims'] = dimid + dim24char
-            savedict['values'] = data
-            savedict['missing_value'] = '-'
-            savedict['_FillValue'] = '-'
-            f.AddData(savedict)
-
-        except:
-            logging.warning("Character arrays 'species' and 'sites' were not written by the io module")
-
-        f.close()
-
-        logging.debug("Successfully wrote data to obs file")
-        logging.info("Sample input file for obs operator now in place [%s]" % obsinputfile)
-
-        return obsinputfile
-
-
-    def AddModelDataMismatch(self):
-        """ 
-            Get the model-data mismatch values for this cycle.
-
-                (1) Open a sites_weights file
-                (2) Parse the data
-                (3) Compare site list against data
-                (4) Take care of double sites, etc
-
-        """
-        import da.tools.rc as rc
-
-        filename = self.DaCycle.DaSystem['obs.sites.rc']
-
-        if not os.path.exists(filename):
-            msg = 'Could not find  the required sites.rc input file (%s) ' % filename
-            logging.error(msg)
-            raise IOError, msg
-        else:
-            self.SitesFile = filename
-
-        SitesWeights = rc.read(self.SitesFile)
-
-        self.rejection_threshold = int(SitesWeights['obs.rejection.threshold'])
-        self.global_R_scaling = float(SitesWeights['global.R.scaling'])
-        self.n_site_categories = int(SitesWeights['n.site.categories'])
-        self.n_sites_active = int(SitesWeights['n.sites.active'])
-        self.n_sites_moved = int(SitesWeights['n.sites.moved'])
-
-        logging.debug('Model-data mismatch rejection threshold: %d ' % self.rejection_threshold)
-        logging.debug('Model-data mismatch scaling factor     : %f ' % self.global_R_scaling)
-        logging.debug('Model-data mismatch site categories    : %d ' % self.n_site_categories)
-        logging.debug('Model-data mismatch active sites       : %d ' % self.n_sites_active)
-        logging.debug('Model-data mismatch moved sites        : %d ' % self.n_sites_moved)
-   
-        cats = [k for k in SitesWeights.keys() if 'site.category' in k] 
-
-        SiteCategories = {}
-        for key in cats:
-            name, error, may_localize, may_reject = SitesWeights[key].split(';')
-            name = name.strip().lower()
-            error = float(error)
-            may_localize = bool(may_localize)
-            may_reject = bool(may_reject)
-            SiteCategories[name] = {'error':error, 'may_localize':may_localize, 'may_reject':may_reject}
-            #print name,SiteCategories[name]
-
-
-        active = [k for k in SitesWeights.keys() if 'site.active' in k] 
-
-        SiteInfo = {}
-        for key in active:
-            sitename, sitecategory = SitesWeights[key].split(';')
-            sitename = sitename.strip().lower()
-            sitecategory = sitecategory.strip().lower()
-            SiteInfo[sitename] = SiteCategories[sitecategory]
-            #print sitename,SiteInfo[sitename]
-
-        for obs in self.Data:
-
-            obs.mdm = 1000.0  # default is very high model-data-mismatch, until explicitly set by script
-
-            if SiteInfo.has_key(obs.code): 
-                logging.debug("Observation found (%s)" % obs.code)
-                obs.mdm = SiteInfo[obs.code]['error']
-                obs.may_localize = SiteInfo[obs.code]['may_localize']
-                obs.may_reject = SiteInfo[obs.code]['may_reject']
-            else:
-                logging.warning("Observation NOT found (%s), please check sites.rc file  (%s)  !!!" % (obs.code, self.SitesFile,))
-                obs.flag = 99
-
-            # Add SiteInfo dictionary to the Observation object for future use
-
-            self.SiteInfo = SiteInfo
-
-
-################### End Class CtObservations ###################
-
-
-
-################### Begin Class MixingRatioSample ###################
-
-class MixingRatioSample(object):
-    """ 
-        Holds the data that defines a Mixing Ratio Sample in the data assimilation framework. Sor far, this includes all
-        attributes listed below in the __init__ method. One can additionally make more types of data, or make new
-        objects for specific projects.
-
-    """
-
-    def __init__(self, id, xdate, code='XXX', obs=0.0, simulated=0.0, resid=0.0, hphr=0.0, mdm=0.0, flag=0, height=0.0, lat= -999., lon= -999., evn='0000', species='co2', samplingstrategy=1, sdev=0.0):
-
-            self.code = code.strip()      # Site code
-            self.xdate = xdate             # Date of obs
-            self.obs = obs               # Value observed
-            self.simulated = simulated         # Value simulated by model
-            self.resid = resid             # Mixing ratio residuals
-            self.hphr = hphr              # Mixing ratio prior uncertainty from fluxes and (HPH) and model data mismatch (R)
-            self.mdm = mdm               # Model data mismatch
-            self.may_localize = True           # Whether sample may be localized in optimizer
-            self.may_reject = True              # Whether sample may be rejected if outside threshold
-            self.flag = flag              # Flag
-            self.height = height            # Sample height
-            self.lat = lat               # Sample lat
-            self.lon = lon               # Sample lon
-            self.id = id               # ID number
-            self.evn = evn               # Event number
-            self.sdev = sdev              # standard deviation of ensemble
-            self.masl = True              # Sample is in Meters Above Sea Level
-            self.mag = not self.masl     # Sample is in Meters Above Ground
-            self.species = species.strip()
-            self.samplingstrategy = samplingstrategy
-
-    def __str__(self):
-            day = self.xdate.strftime('%Y-%m-%d %H:%M:%S')
-            return ' '.join(map(str, [self.code, day, self.obs, self.flag, self.id]))
-
-################### End Class MixingRatioSample ###################
-
-################### Begin Class MixingRatioList ###################
-
-class MixingRatioList(list):
-    """ This is a special type of list that holds MixingRatioSample objects. It has methods to extract data from such a list easily """
-    from numpy import array, ndarray
-
-    def getvalues(self, name, constructor=array):
-            from numpy import ndarray
-            result = constructor([getattr(o, name) for o in self])
-            if isinstance(result, ndarray): 
-                return result.squeeze()
-            else:
-                return result
-    def unflagged(self):
-            return MixingRatioSample([o for o in self if o.flag == 0])
-    def flagged(self):
-            return MixingRatioSample([o for o in self if o.flag != 0])
-    def selectsite(self, site='mlo'):
-            l = [o for o in self if o.code == site]
-            return MixingRatioSample(l)
-    def selecthours(self, hours=range(1, 24)):
-            l = [o for o in self if o.xdate.hour in hours]
-            return MixingRatioSample(l)
-
-################### End Class MixingRatioList ###################
-
-if __name__ == "__main__":
-
-    from da.tools.initexit import StartLogger
-    from da.tools.pipeline import JobStart
-    from datetime import datetime
-    import logging
-    import sys, os
-
-    sys.path.append(os.getcwd())
-
-    StartLogger()
-
-    obs = CtObservations()
-
-    DaCycle = JobStart(['-v'], {'rc':'da.rc'})
-    DaCycle['time.sample.start'] = datetime(2000, 1, 1)
-    DaCycle['time.sample.end'] = datetime(2000, 1, 2)
-
-    obs.Initialize()
-    obs.Validate()
-    obs.AddObs()
-    print(obs.Data.getvalues('obs'))
-
diff --git a/da/ct/optimizer.py b/da/ct/optimizer.py
deleted file mode 100755
index c5d7783a172c34bf75966e45e18534a05df94c59..0000000000000000000000000000000000000000
--- a/da/ct/optimizer.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env python
-# optimizer.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 28 Jul 2010.
-
-"""
-
-import os
-import sys
-import logging
-import datetime
-import numpy as np
-sys.path.append(os.getcwd())
-
-from da.baseclasses.optimizer import Optimizer
-
-
-identifier = 'Ensemble Square Root Filter'
-version = '0.0'
-
-################### Begin Class CtOptimizer ###################
-
-class CtOptimizer(Optimizer):
-    """
-        This creates an instance of a CarbonTracker optimization object. The base class it derives from is the optimizer object.
-        Additionally, this CtOptimizer implements a special localization option following the CT2007 method.
-
-        All other methods are inherited from the base class Optimizer.
-    """
-    def getid(self):
-        return identifier
-
-    def getversion(self):
-        return version
-
-
-    def SetLocalization(self, type='None'):
-        """ determine which localization to use """
-
-        if type == 'CT2007':
-            self.localization = True
-            self.localizetype = 'CT2007'
-        else:
-            self.localization = False
-            self.localizetype = 'None'
-    
-        logging.info("Current localization option is set to %s" % self.localizetype)
-
-    def Localize(self, n):
-        """ localize the Kalman Gain matrix """
-        if not self.localization: return 
-        if self.localizetype == 'CT2007':
-
-            tvalue = 1.97591
-            if np.sqrt(self.R[n, n]) >= 1.5:
-                for r in range(self.nlag * self.nparams):
-                    corr = np.corrcoef(self.HX_prime[n, :], self.X_prime[r, :].squeeze())[0, 1]
-                    prob = corr / np.sqrt((1.0 - corr ** 2) / (self.nmembers - 2))
-                    if abs(prob) < tvalue:
-                        self.KG[r, n] = 0.0
-
-
-################### End Class CtOptimizer ###################
-
-
-if __name__ == "__main__":
-
-    from da.tools.initexit import StartLogger 
-    from da.tools.pipeline import JobStart 
-
-    sys.path.append(os.getcwd())
-
-    opts = ['-v']
-    args = {'rc':'da.rc', 'logfile':'da_initexit.log', 'jobrcfilename':'test.rc'}
-
-    StartLogger()
-
-    DaCycle = JobStart(opts, args)
-    DaCycle.Initialize()
-
-    opt = CtOptimizer()
-
-    nobs = 100
-    dims = (int(DaCycle['time.nlag']),
-                  int(DaCycle['da.optimizer.nmembers']),
-                  int(DaCycle.DaSystem['nparameters']),
-                  nobs,)
-
-    opt.Initialize(dims)
-    opt.SetLocalization(type='CT2007')
diff --git a/da/ct/standardvariables.py b/da/ct/standardvariables.py
deleted file mode 100755
index 9eac8f64499b0e5e93c6f65c5de4e2f30681d877..0000000000000000000000000000000000000000
--- a/da/ct/standardvariables.py
+++ /dev/null
@@ -1,188 +0,0 @@
-standard_variables = { 'bio_flux_prior' : {'name'        : 'bio_flux_prior',\
-                                         'units'         : 'mol m-2 s-1' ,\
-                                         'long_name'     : 'Surface flux of carbon dioxide, terrestrial vegetation, not optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'surface_carbon_dioxide_mole_flux', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'bio_flux_opt' : {'name'          : 'bio_flux_opt',\
-                                         'units'         : 'mol m-2 s-1' ,\
-                                         'long_name'     : 'Surface flux of carbon dioxide, terrestrial biosphere , optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'surface_carbon_dioxide_mole_flux', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'ocn_flux_prior' : {'name'        : 'ocn_flux_prior',\
-                                         'units'         : 'mol m-2 s-1' ,\
-                                         'long_name'     : 'Surface flux of carbon dioxide, open ocean , not optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'surface_carbon_dioxide_mole_flux', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'ocn_flux_opt' : {'name'          : 'ocn_flux_opt',\
-                                         'units'         : 'mol m-2 s-1' ,\
-                                         'long_name'     : 'Surface flux of carbon dioxide, open ocean , optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'surface_carbon_dioxide_mole_flux', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'fossil_flux_imp' : {'name'       : 'fossil_flux_imp',\
-                                         'units'         : 'mol m-2 s-1' ,\
-                                         'long_name'     : 'Surface flux of carbon dioxide, fossil fuel burning , imposed ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'surface_carbon_dioxide_mole_flux', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'fire_flux_imp' : {'name'         : 'fire_flux_imp',\
-                                         'units'         : 'mol m-2 s-1' ,\
-                                         'long_name'     : 'Surface flux of carbon dioxide, biomass burning , imposed ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'surface_carbon_dioxide_mole_flux', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'bio_flux_prior_cov' : {'name'    : 'bio_flux_prior_cov',\
-                                         'units'         : 'mol2 region-2 s-2' ,\
-                                         'long_name'     : 'Covariance of surface flux of carbon dioxide, terrestrial vegetation , not optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : '', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'bio_flux_opt_cov' : {'name'      : 'bio_flux_opt_cov',\
-                                         'units'         : 'mol2 region-2 s-2' ,\
-                                         'long_name'     : 'Covariance of surface flux of carbon dioxide, terrestrial vegetation , optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : '', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'ocn_flux_prior_cov' : {'name'    : 'ocn_flux_prior_cov',\
-                                         'units'         : 'mol2 region-2 s-2' ,\
-                                         'long_name'     : 'Covariance of surface flux of carbon dioxide, open ocean , not optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : '', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'ocn_flux_opt_cov' : {'name'      : 'ocn_flux_opt_cov',\
-                                         'units'         : 'mol2 region-2 s-2' ,\
-                                         'long_name'     : 'Covariance of surface flux of carbon dioxide, open ocean , optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : '', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'decimal_date' :  {'name'         : 'decimal_date',\
-                                         'units'         : 'years' ,\
-                                         'long_name'     : 'dates and times', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'date', \
-                                         'dims'          : (), \
-                                         'dtype'         : 'double', \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'date' :         {'name'          : 'date',\
-                                         'units'         : 'days since 2000-01-01 00:00:00 UTC' ,\
-                                         'long_name'     : 'UTC dates and times', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'date', \
-                                         'dims'          : (), \
-                                         'dtype'         : 'double', \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'idate' :        {'name'          : 'idate',\
-                                         'units'         : 'yyyy MM dd hh mm ss ' ,\
-                                         'long_name'     : 'integer components of date and time', \
-                                         'standard_name' : 'calendar_components', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'dims'          : (), \
-                                         'dtype'         : 'int', \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'latitude' :     {'name'          : 'latitude',\
-                                         'units'         : 'degrees_north ' ,\
-                                         'long_name'     : 'latitude', \
-                                         'standard_name' : 'latitude', \
-                                         'comment'       : 'center of interval',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'longitude' :     {'name'         : 'longitude',\
-                                         'units'         : 'degrees_east ' ,\
-                                         'long_name'     : 'longitude', \
-                                         'standard_name' : 'longitude', \
-                                         'comment'       : 'center of interval',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'height' :        {'name'         : 'height',\
-                                         'units'         : 'masl ' ,\
-                                         'long_name'     : 'height_above_ground_level', \
-                                         'standard_name' : 'height_above_ground_level', \
-                                         'comment'       : 'value is meters above sea level',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'co2' :           {'name'         : 'co2',\
-                                         'units'         : 'micromol mol-1 ' ,\
-                                         'long_name'     : 'mole_fraction_of_carbon_dioxide_in_air', \
-                                         'standard_name' : 'mole_fraction_of_carbon_dioxide_in_air', \
-                                         'comment'       : '',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'meanstate' :     {'name'         : 'statevectormean',\
-                                         'units'         : 'unitless' ,\
-                                         'long_name'     : 'mean_value_of_state_vector', \
-                                         'standard_name' : 'mean_value_of_state_vector', \
-                                         'comment'       : '',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'ensemblestate':  {'name'         : 'statevectorensemble',\
-                                         'units'         : 'unitless' ,\
-                                         'long_name'     : 'ensemble_value_of_state_vector', \
-                                         'standard_name' : 'ensemble_value_of_state_vector', \
-                                         'comment'       : '',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'unknown' :      {'name'          : '',\
-                                         'units'         : '' ,\
-                                         'long_name'     : '', \
-                                         'standard_name' : '', \
-                                         'comment'       : '',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                     }
-
-
-
-
diff --git a/da/ct/statevector.py b/da/ct/statevector.py
deleted file mode 100755
index a1e6aa5e1f247641978d00bcf57b2aa1812a4191..0000000000000000000000000000000000000000
--- a/da/ct/statevector.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python
-# ct_statevector_tools.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 28 Jul 2010.
-
-"""
-
-import os
-import sys
-import logging
-import datetime
-import numpy as np
-import da.tools.io4 as io
-
-sys.path.append(os.getcwd())
-from da.baseclasses.statevector import EnsembleMember, StateVector
-
-
-identifier = 'CarbonTracker Statevector '
-version = '0.0'
-
-################### Begin Class CtStateVector ###################
-
-class CtStateVector(StateVector):
-    """ This is a StateVector object for CarbonTracker. It has a private method to make new ensemble members """
-
-    def getid(self):
-        return identifier
-
-    def getversion(self):
-        return version
-
-    def GetCovariance(self, date):
-        """ Make a new ensemble from specified matrices, the attribute lag refers to the position in the state vector. 
-            Note that lag=1 means an index of 0 in python, hence the notation lag-1 in the indexing below.
-            The argument is thus referring to the lagged state vector as [1,2,3,4,5,..., nlag]
-        """    
-
-        
-        try:
-            import matplotlib.pyplot as plt
-        except:
-            pass
-
-        # Get the needed matrices from the specified covariance files
-
-        file_ocn_cov = self.DaCycle.DaSystem['ocn.covariance'] 
-        file_bio_cov = self.DaCycle.DaSystem['bio.covariance'] 
-
-        # replace YYYY.MM in the ocean covariance file string
-
-        file_ocn_cov = file_ocn_cov.replace('2000.01', date.strftime('%Y.%m'))
-
-        for file in [file_ocn_cov, file_bio_cov]:
-            if not os.path.exists(file):
-                msg = "Cannot find the specified file %s" % file
-                logging.error(msg)
-                raise IOError, msg
-            else:
-                logging.info("Using covariance file: %s" % file)
-
-        f_ocn = io.CT_Read(file_ocn_cov, 'read')
-        f_bio = io.CT_Read(file_bio_cov, 'read')
-
-        cov_ocn = f_ocn.GetVariable('CORMAT')
-        cov_bio = f_bio.GetVariable('qprior')
-
-        f_ocn.close()
-        f_bio.close()
-
-        logging.debug("Succesfully closed files after retrieving prior covariance matrices")
-
-        # Once we have the matrices, we can start to make the full covariance matrix, and then decompose it
-
-        fullcov = np.zeros((self.nparams, self.nparams), float)
-
-        nocn = cov_ocn.shape[0]
-        nbio = cov_bio.shape[0]
-
-        fullcov[0:nbio, 0:nbio] = cov_bio
-        fullcov[nbio:nbio + nocn, nbio:nbio + nocn] = 0.16 * np.dot(cov_ocn, cov_ocn)
-        fullcov[nocn + nbio, nocn + nbio] = 1.e-10
-
-
-        try:
-            plt.imshow(fullcov)
-            plt.colorbar()
-            plt.savefig('fullcovariancematrix.png')
-            plt.close('all')
-            logging.debug("Covariance matrix visualized for inspection")
-        except:
-            pass
-
-        return fullcov
-
-
-################### End Class CtStateVector ###################
-
-
-if __name__ == "__main__":
-
-
-    from da.tools.initexit import StartLogger 
-    from da.tools.pipeline import JobStart
-
-
-    sys.path.append(os.getcwd())
-
-    opts = ['-v']
-    args = {'rc':'da.rc', 'logfile':'da_initexit.log', 'jobrcfilename':'test.rc'}
-
-    StartLogger()
-
-    DaCycle = JobStart(opts, args)
-    DaCycle.Initialize()
-    StateVector = CtStateVector()
-    StateVector.Initialize()
-
-    for n in range(dims[0]):
-        cov = StateVector.GetCovariance()
-        StateVector.MakeNewEnsemble(n + 1, cov)
-
-    StateVector.Propagate()
-    
-    filename = os.path.join(DaCycle['dir.output'], 'savestate.nc')
-    StateVector.WriteToFile()
-    StateVector.ReadFromFile(filename)
-
diff --git a/da/ct/tools.py b/da/ct/tools.py
deleted file mode 100755
index 20cb223b8c4aa8d2930d9c070bbce845bcad5c1f..0000000000000000000000000000000000000000
--- a/da/ct/tools.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env python
-# ct_tools.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 12 Feb 2009.
-
-"""
-import os
-import sys
-import logging
-import datetime
-
-identifier = 'CarbonTracker CO2'
-
-def StateToGrid(values,regionmap,reverse=False,avg=False):
-    """ 
-    This method converts parameters from a CarbonTracker StateVector object to a gridded map of linear multiplication values. These
-    can subsequently be used in the transport model code to multiply/manipulate fluxes
-
-    """
-    import numpy as np
-    nregions  = regionmap.max()
-
-    # dictionary for region <-> map conversions
-
-    regs={}
-    for r in np.arange(1,nregions+1):
-         sel=(regionmap.flat == r).nonzero()
-         if len(sel[0])>0: regs[r]=sel
-
-    regionselect=regs
-
-    if reverse:
-
-        """ project 1x1 degree map onto ecoregions """
-
-        result=np.zeros(nregions,float)
-        for k,v in regionselect.iteritems():
-            if avg: 
-                result[k-1]=values.ravel().take(v).mean()
-            else : 
-                result[k-1]=values.ravel().take(v).sum()
-        return result
-
-    else:
-
-        """ project ecoregion properties onto 1x1 degree map """
-
-        result=np.zeros((180,360,),float)
-        for k,v in regionselect.iteritems():
-            result.put(v,values[k-1])
-
-        return result
-
-if __name__ == "__main__":
-
-    sys.path.append('../../')
-
-    import os
-    import sys
-    from da.tools.general import StartLogger 
-    from da.tools.initexit import CycleControl 
-    import numpy as np
-    import da.tools.rc as rc 
-    from pylab import *
-
-    opts = ['-v']
-    args = {'rc':'da.rc','logfile':'da_initexit.log','jobrcfilename':'test.rc'}
-
-    StartLogger()
-    DaCycle = CycleControl(opts,args)
-
-    DaCycle.Initialize()
-    print DaCycle
-
-    a=arange(240)+100
-
-    b = StateToGrid(DaCycle.DaSystem,a)
-
-    figure()
-    imshow(b)
-    colorbar()
-    print b.max()
-
-    c = StateToGrid(DaCycle.DaSystem,b,reverse=True,avg=True)
-
-    figure()
-    print c.max()
-
-    plot(a,label='original') 
-    plot(c,label='reconstructed') 
-    legend(loc=0)
-
-
-    show()
-
-    
-
-
-
diff --git a/da/ctgridded/dasystem.py b/da/ctgridded/dasystem.py
deleted file mode 100755
index ad75f4f4db024d48e1b2f22e0b493cc302bb05b1..0000000000000000000000000000000000000000
--- a/da/ctgridded/dasystem.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-# control.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 26 Aug 2010.
-
-"""
-
-import os
-import sys
-import logging
-import datetime
-
-################### Begin Class CtDaSystem ###################
-
-from da.baseclasses.dasystem import DaSystem
-
-class CtGriddedDaSystem(DaSystem):
-    """ Information on the data assimilation system used. This is normally an rc-file with settings.
-    """
-
-    def __init__(self,rcfilename):
-        """
-        Initialization occurs from passed rc-file name, items in the rc-file will be added
-        to the dictionary
-        """
-
-        self.Identifier     = 'CarbonTracker Gridded CO2'    # the identifier gives the platform name
-
-        self.LoadRc(rcfilename)
-
-        msg  = 'Data Assimilation System initialized: %s'%self.Identifier ; logging.debug(msg)
-
-    def Validate(self):
-        """ 
-        Validate the contents of the rc-file given a dictionary of required keys
-        """
-
-        needed_rc_items = ['obs.input.dir',
-                           'obs.input.fname',
-                           'ocn.covariance',
-                           'nparameters',
-                           'deltaco2.prefix',
-                           'regtype']
-
-
-        for k,v in self.iteritems():
-            if v == 'True' : self[k] = True
-            if v == 'False': self[k] = False
-
-        for key in needed_rc_items:
-
-            if not self.has_key(key):
-                status,msg = ( False,'Missing a required value in rc-file : %s' % key)
-                logging.error(msg)
-                raise IOError,msg
-
-        status,msg = ( True,'DA System Info settings have been validated succesfully' )  ; logging.debug(msg)
-
-        return None
-################### End Class CtDaSystem ###################
-
-
-if __name__ == "__main__":
-    pass
diff --git a/da/ctgridded/statevector.py b/da/ctgridded/statevector.py
deleted file mode 100755
index 651295e2b552e43e89bfb3a5f0886c24cbbde2f0..0000000000000000000000000000000000000000
--- a/da/ctgridded/statevector.py
+++ /dev/null
@@ -1,271 +0,0 @@
-#!/usr/bin/env python
-# ct_statevector_tools.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 28 Jul 2010.
-
-"""
-
-import os
-import sys
-sys.path.append(os.getcwd())
-sys.path.append('../../')
-
-
-import logging
-import datetime
-from da.baseclasses.statevector import EnsembleMember, StateVector
-import numpy as np
-
-identifier = 'CarbonTracker Gridded Statevector '
-version    = '0.0'
-
-################### Begin Class CtStateVector ###################
-
-class CtGriddedStateVector(StateVector):
-    """ This is a StateVector object for CarbonTracker. It has a private method to make new ensemble members """
-
-    def getid(self):
-        return identifier
-
-    def getversion(self):
-        return version
-
-    def GetCovariance(self,date):
-        """ Make a new ensemble from specified matrices, the attribute lag refers to the position in the state vector. 
-            Note that lag=1 means an index of 0 in python, hence the notation lag-1 in the indexing below.
-            The argument is thus referring to the lagged state vector as [1,2,3,4,5,..., nlag]
-        """    
-
-        import da.tools.io4 as io
-        try:
-            import matplotlib.pyplot as plt
-        except:
-            pass
-
-        # Get the needed matrices from the specified covariance files
-
-        file_ocn_cov = self.DaCycle.DaSystem['ocn.covariance'] 
-
-        cov_files    = os.listdir(self.DaCycle.DaSystem['bio.cov.dir'])
-
-        cov_files    = [os.path.join(self.DaCycle.DaSystem['bio.cov.dir'],f) for f in cov_files if self.DaCycle.DaSystem['bio.cov.prefix'] in f]
-
-        msg = "Found %d covariances to use for biosphere" % len(cov_files) ; logging.debug(msg)
-
-        # replace YYYY.MM in the ocean covariance file string
-
-        file_ocn_cov = file_ocn_cov.replace('2000.01',date.strftime('%Y.%m'))
-
-        cov_files.append(file_ocn_cov)
-
-        covariancematrixlist = []
-        for file in cov_files:
-
-            if not os.path.exists(file):
-
-                msg = "Cannot find the specified file %s" % file ; logging.error(msg)
-                raise IOError,msg
-            else:
-
-                msg = "Using covariance file: %s" % file ; logging.debug(msg)
-
-            f   = io.CT_Read(file,'read')
-
-            if 'pco2' in file: 
-                cov_ocn     = f.GetVariable('CORMAT')
-                cov         = 0.16 *np.dot(cov_ocn,cov_ocn)
-            else: 
-                cov         = f.GetVariable('covariance')
-
-            dummy   = f.close()
-
-            covariancematrixlist.append(cov)
-
-        dummy   = logging.debug("Succesfully closed files after retrieving prior covariance matrices")
-
-        # Once we have the matrices, we can start to make the full covariance matrix, and then decompose it
-
-        return covariancematrixlist
-
-    def MakeNewEnsemble(self,lag, covariancematrixlist = [None]):
-        """ 
-        :param lag: an integer indicating the time step in the lag order
-        :param covariancematrix: a list of matrices specifying the covariance distribution to draw from
-        :rtype: None
-    
-        Make a new ensemble, the attribute lag refers to the position in the state vector. 
-        Note that lag=1 means an index of 0 in python, hence the notation lag-1 in the indexing below.
-        The argument is thus referring to the lagged state vector as [1,2,3,4,5,..., nlag]
-
-        The covariance list object to be passed holds a list of matrices with a total number of dimensions [nparams, nparams], which is
-        used to draw ensemblemembers from. Each draw is done on a matrix from the list, to make the computational burden smaller when
-        the StateVector nparams becomes very large.
-
-        """    
-        try:
-            import matplotlib.pyplot as plt
-        except:
-            pass
-
-        if not isinstance(covariancematrixlist,list):
-            msg = "The covariance matrix or matrices must be passed as a list of array objects, exiting..." ; logging.error(msg)
-            raise ValueError
-
-        # Check dimensions of covariance matrix list, must add up to nparams
-
-        dims = 1  # start from 1.0 to account for the last parameter that scales Ice+Non-optimized, we have no covariance matrix for this though
-
-        for matrix in covariancematrixlist: dims += matrix.shape[0]
-
-        if dims != self.nparams:
-            msg = "The total dimension of the covariance matrices passed (%d) does not add up to the prescribed nparams (%d), exiting..."%(dims,self.nparams) ; logging.error(msg)
-            raise ValueError
-
-        # Loop over list if identity matrices and create a matrix of (nparams,nmembers) with the deviations
-
-        istart      = 0
-        istop       = 0
-        dof         = 0.0
-        dev_matrix  = np.zeros((self.nparams,self.nmembers,),'float')
-        randstate    = np.random.get_state()
-
-        for matrix in covariancematrixlist:
-
-
-            # Make a cholesky decomposition of the covariance matrix
-
-            U,s,Vh  = np.linalg.svd(matrix)
-            dof     += np.sum(s)**2/sum(s**2)
-            try:
-                C       = np.linalg.cholesky(matrix)
-            except np.linalg.linalg.LinAlgError,err:
-                msg =   'Cholesky decomposition has failed '                    ; logging.error(msg)
-                msg =   'For a matrix of dimensions: %d'%matrix.shape[0]        ; logging.error(msg)
-                logging.debug(err)
-                raise np.linalg.linalg.LinAlgError
-
-
-            # Draw nmembers instances of this distribution
-
-            npoints      = matrix.shape[0]
-
-            istop        = istop+npoints
-
-            for member in range(1,self.nmembers):
-                rands        = np.random.randn(npoints)
-                deviations   = np.dot(C,rands)
-                dev_matrix[istart:istop,member-1] = deviations
-                dev_matrix[istop,member-1] = 1.e-10*np.random.randn()
-
-            #cov2 = np.dot(dev_matrix[istart:istop,:],np.transpose(dev_matrix[istart:istop,:])) / (self.nmembers-1)
-            #print matrix.sum(),cov2.sum(),abs(matrix.diagonal()-cov2.diagonal()).max(), matrix.shape,cov2.shape
-
-            istart      = istart + npoints
-
-        msg =   'Successfully constructed a deviation matrix from covariance structure' ; logging.debug(msg)
-        msg =   'Appr. degrees of freedom in full covariance matrix is %s'%(int(dof))  ; logging.info(msg)
-
-        # Now fill the ensemble members with the deviations we have just created
-
-
-        # Create mean values 
-
-        NewMean                     = np.ones(self.nparams,float) # standard value for a new time step is 1.0
-
-        # If this is not the start of the filter, average previous two optimized steps into the mix
-
-        if lag == self.nlag and self.nlag >= 3:
-            NewMean                     += self.EnsembleMembers[lag-2][0].ParameterValues + \
-                                           self.EnsembleMembers[lag-3][0].ParameterValues 
-            NewMean                      = NewMean/3.0
-
-        # Create the first ensemble member with a deviation of 0.0 and add to list
-
-        NewMember                   = self.GetNewMember(0)
-        NewMember.ParameterValues   = NewMean.flatten()  # no deviations
-        dummy                       = self.EnsembleMembers[lag-1].append(NewMember)
-
-        # Create members 1:nmembers and add to EnsembleMembers list
-
-        for member in range(1,self.nmembers):
-
-            NewMember                   = self.GetNewMember(member)
-            NewMember.ParameterValues   = dev_matrix[:,member-1]
-            dummy                       = self.EnsembleMembers[lag-1].append(NewMember)
-
-        msg =   '%d new ensemble members were added to the state vector # %d'%(self.nmembers,lag)  ; logging.debug(msg)
-
-
-
-
-
-################### End Class CtStateVector ###################
-
-
-if __name__ == "__main__":
-
-    import os
-    import sys
-    from da.tools.initexit import StartLogger , ParseOptions
-    from da.tools.pipeline import JobStart
-    from da.tools.initexit import CycleControl
-    from da.ctgridded.dasystem import CtGriddedDaSystem 
-    import numpy as np
-
-    sys.path.append(os.getcwd())
-    sys.path.append('../../')
-
-    opts = ['-v']
-    args = {'rc':'../../dagridded.rc'}
-
-    StartLogger(level = logging.DEBUG)
-
-    DaCycle     = CycleControl(opts,args)
-
-    DaSystem    = CtGriddedDaSystem('../../da/rc/carbontrackergridded.rc')
-    StateVector = CtGriddedStateVector()
-
-    DaSystem.Initialize()
-    DaSystem.Validate()
-    DaCycle.DaSystem    = DaSystem
-    DaCycle.Initialize()
-    StateVector.DaCycle = DaCycle # also embed object in StateVector so it can access cycle information for I/O etc
-
-    dummy               = StateVector.Initialize()
-
-    for n in range(1):
-        cov   = StateVector.GetCovariance(DaCycle['time.start'])
-        dummy = StateVector.MakeNewEnsemble(n+1,cov)
-
-    #StateVector.Propagate()
-
-    #savedir         = './'
-    #filename        = os.path.join(savedir,'savestate.nc')
-    #dummy = StateVector.WriteToFile(filename)
-    #StateVector.ReadFromFile(filename)
-
-    StateVector.StateToTC(fluxvector=np.ones(StateVector.nparams) )
-
-    sys.exit(2)
-
-    members =  StateVector.EnsembleMembers[-1]
-
-    
-
-    for mem in members[0:1]:
-
-        mem.ParameterValues = np.arange(StateVector.nparams)+1.0
-        data                = StateVector.VectorToGrid(vectordata=mem.ParameterValues)
-        params              = StateVector.VectorToGrid(griddata=data, reverse=True, method = "minval")
-
-        tcparams            = StateVector.VectorToTC(mem.ParameterValues)
-
-    print (StateVector.gridmap-data).min()
-    print (StateVector.gridmap-data).max()
-    print (mem.ParameterValues-params).min()
-    print (mem.ParameterValues-params).max()
-
diff --git a/da/doc/Makefile b/da/doc/Makefile
deleted file mode 100644
index 4c501ee2bdb51292924be5024abbe4df4d2f5151..0000000000000000000000000000000000000000
--- a/da/doc/Makefile
+++ /dev/null
@@ -1,130 +0,0 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS    =
-SPHINXBUILD   = sphinx-build-2.6
-PAPER         =
-BUILDDIR      = build
-
-# Internal variables.
-PAPEROPT_a4     = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
-
-help:
-	@echo "Please use \`make <target>' where <target> is one of"
-	@echo "  html       to make standalone HTML files"
-	@echo "  dirhtml    to make HTML files named index.html in directories"
-	@echo "  singlehtml to make a single large HTML file"
-	@echo "  pickle     to make pickle files"
-	@echo "  json       to make JSON files"
-	@echo "  htmlhelp   to make HTML files and a HTML help project"
-	@echo "  qthelp     to make HTML files and a qthelp project"
-	@echo "  devhelp    to make HTML files and a Devhelp project"
-	@echo "  epub       to make an epub"
-	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
-	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
-	@echo "  text       to make text files"
-	@echo "  man        to make manual pages"
-	@echo "  changes    to make an overview of all changed/added/deprecated items"
-	@echo "  linkcheck  to check all external links for integrity"
-	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
-
-clean:
-	-rm -rf $(BUILDDIR)/*
-
-html:
-	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
-	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
-	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
-	@echo
-	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
-	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
-	@echo
-	@echo "Build finished; now you can process the pickle files."
-
-json:
-	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
-	@echo
-	@echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
-	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
-	@echo
-	@echo "Build finished; now you can run HTML Help Workshop with the" \
-	      ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
-	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
-	@echo
-	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
-	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
-	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/CarbonTrackerDataAssimilationShell.qhcp"
-	@echo "To view the help file:"
-	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/CarbonTrackerDataAssimilationShell.qhc"
-
-devhelp:
-	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
-	@echo
-	@echo "Build finished."
-	@echo "To view the help file:"
-	@echo "# mkdir -p $$HOME/.local/share/devhelp/CarbonTrackerDataAssimilationShell"
-	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/CarbonTrackerDataAssimilationShell"
-	@echo "# devhelp"
-
-epub:
-	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
-	@echo
-	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo
-	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
-	@echo "Run \`make' in that directory to run these through (pdf)latex" \
-	      "(use \`make latexpdf' here to do that automatically)."
-
-latexpdf:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo "Running LaTeX files through pdflatex..."
-	make -C $(BUILDDIR)/latex all-pdf
-	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-text:
-	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
-	@echo
-	@echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-man:
-	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
-	@echo
-	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-changes:
-	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
-	@echo
-	@echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
-	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
-	@echo
-	@echo "Link check complete; look for any errors in the above output " \
-	      "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
-	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
-	@echo "Testing of doctests in the sources finished, look at the " \
-	      "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/da/doc/source/_static/banner1.png b/da/doc/source/_static/banner1.png
deleted file mode 100644
index d505cf3edc998de70a54fac87cf735639554a004..0000000000000000000000000000000000000000
Binary files a/da/doc/source/_static/banner1.png and /dev/null differ
diff --git a/da/doc/source/_templates/layout.html b/da/doc/source/_templates/layout.html
deleted file mode 100644
index 15667a5d4dc7b877e22db86c24e1f43f12db75cb..0000000000000000000000000000000000000000
--- a/da/doc/source/_templates/layout.html
+++ /dev/null
@@ -1,7 +0,0 @@
-{% extends "!layout.html" %} 
-
-{% block header %} 
-<div style="background-color: white; text-align: left; padding: 10px 10px 15px 15px">
-<a href="#"><img src="_static/banner1.png" border="0" alt="CarbonTracker"/></a>
-</div>
-{% endblock %} 
diff --git a/da/doc/source/conf.py b/da/doc/source/conf.py
deleted file mode 100644
index 097333b4526d77dc09898f55fc7f7626bfbf6490..0000000000000000000000000000000000000000
--- a/da/doc/source/conf.py
+++ /dev/null
@@ -1,217 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# CarbonTracker Data Assimilation Shell documentation build configuration file, created by
-# sphinx-quickstart on Sun Sep 26 13:39:23 2010.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys, os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(),'../../../')))
-print sys.path
-
-# -- General configuration -----------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'CarbonTracker Data Assimilation Shell'
-copyright = u'2010, Wouter Peters'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = '0.1'
-# The full version, including alpha/beta/rc tags.
-release = '0.1'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = []
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages.  See the documentation for
-# a list of builtin themes.
-html_theme = 'sphinxdoc'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further.  For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents.  If None, it defaults to
-# "<project> v<release> documentation".
-html_title = 'CarbonTracker DAS'
-
-# A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = './images/carbontracker.png'
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_domain_indices = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it.  The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'CarbonTrackerDataAssimilationShelldoc'
-
-
-# -- Options for LaTeX output --------------------------------------------------
-
-# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
-
-# The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
-  ('index', 'CarbonTrackerDataAssimilationShell.tex', u'CarbonTracker Data Assimilation Shell Documentation',
-   u'Wouter Peters', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# If true, show page references after internal links.
-#latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-#latex_show_urls = False
-
-# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_domain_indices = True
-
-
-# -- Options for manual page output --------------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
-    ('index', 'carbontrackerdataassimilationshell', u'CarbonTracker Data Assimilation Shell Documentation',
-     [u'Wouter Peters'], 1)
-]
diff --git a/da/doc/source/contents.rst b/da/doc/source/contents.rst
deleted file mode 100644
index 1d8f599a7c2773359f53a2c593a64504bf99ed4d..0000000000000000000000000000000000000000
--- a/da/doc/source/contents.rst
+++ /dev/null
@@ -1,13 +0,0 @@
-.. _contents:
-
-Contents
-========
-
-.. toctree::
-   :maxdepth: 2
-
-   System Requirements <systemrequirements>
-   Installing CTDAS <installing>
-   CTDAS Overview <overview>
-   CTDAS Tutorial <tutorial>
-   CTDAS Documentation <documentation>
diff --git a/da/doc/source/cyclecontrol.rst b/da/doc/source/cyclecontrol.rst
deleted file mode 100644
index 45e67c6b5cbca3975a88d35330a1aaf45f037dfa..0000000000000000000000000000000000000000
--- a/da/doc/source/cyclecontrol.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-.. _cyclecontrol:
-
-Cycle Control
-======================
-.. automodule:: da.tools.initexit
diff --git a/da/doc/source/dasystem.rst b/da/doc/source/dasystem.rst
deleted file mode 100644
index 2b2d8f1493f6c2d90ef55603951ffb152c1706d4..0000000000000000000000000000000000000000
--- a/da/doc/source/dasystem.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-.. _dasystem:
-
-DaSystem
-======================
-.. automodule:: da.baseclasses.dasystem
diff --git a/da/doc/source/docdesign.rtf b/da/doc/source/docdesign.rtf
deleted file mode 100644
index d83f512dcda592e05d4bc302e91e55ad31df27a5..0000000000000000000000000000000000000000
--- a/da/doc/source/docdesign.rtf
+++ /dev/null
@@ -1,47 +0,0 @@
-{\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf320
-{\fonttbl\f0\fswiss\fcharset0 Helvetica;}
-{\colortbl;\red255\green255\blue255;}
-\paperw11900\paperh16840\margl1440\margr1440\vieww9000\viewh8400\viewkind0
-\pard\tx566\tx1133\tx1700\tx2267\tx2834\tx3401\tx3968\tx4535\tx5102\tx5669\tx6236\tx6803\ql\qnatural\pardirnatural
-
-\f0\fs24 \cf0 Design of CT Documentation\
-\
-\
-Welcome\
-System Requirements\
-Installation\
-\
-Contents\
-\
-Overview of CTDAS\
-	Design philosophy\
-	Extensions\
-\
-Tutorial\
-	Getting Started\
-		Creating rc-files\
-		Creating a run script\
-		Setting up your platform\
-	Running an inversion\
-		Starting a new job\
-		Restart from a crash\
-		Extend an existing simulation\
-	Modifying the system\
-		Defining your own classes\
-		Creating a new pipeline\
-\
-Reference/Documentation\
-	Cycle Control\
-	Platforms\
-	DA System\
-	Observations\
-	State Vectors\
-	Optimization\
-	Observation Operator\
-	Pipeline\
-	Baseclasses\
-	I/O\
-\
-Modules, Classes, Functions\
-\
-	}
\ No newline at end of file
diff --git a/da/doc/source/documentation.rst b/da/doc/source/documentation.rst
deleted file mode 100644
index c68db8e8d1deeeaa2a80fc62373c7dcdc2d1fec0..0000000000000000000000000000000000000000
--- a/da/doc/source/documentation.rst
+++ /dev/null
@@ -1,58 +0,0 @@
-.. _documentation:
-
-Documentation
-======================
-
-Documentation exists for both the baseclasses that form the basic building blocks of CTDAS 
-(also see ), and for the specific implementation of these for the CarbonTracker CO2 system. 
-The specific classes *always* inherit from the baseclasses. 
-
-Baseclasses
------------
-
-.. toctree::
-   :maxdepth: 2
-   
-   The CycleControl Object <cyclecontrol>
-   The DASystem Object <dasystem>
-   The PlatForm Object <platform>
-   The Observations Object <observations>
-   The ObservationOperator Object <observationoperator>
-   The StateVector and EnsembleMember Objects <statevector>
-   The Optimizer Object <optimizer>
-
-Platform classes 
-----------------
-
-.. toctree::
-   :maxdepth: 2
-
-   The maunaloa platform (MacOSX at Wageningen University) <maunaloa>
-   The jet platform (Linux system at NOAA ESRL)  <jet>
-
-Pipelines
----------
-
-.. toctree::
-   :maxdepth: 2
-
-   Inverse pipeline <pipeline>
-
-
-CarbonTracker CO2 classes
--------------------------
-
-.. toctree::
-   :maxdepth: 2
-   
-   The CtObservations Object <ctobservations>
-   The TM5ObservationOperator Object <tm5observationoperator>
-   The CtStateVector and EnsembleMember Objects <ctstatevector>
-   The CtOptimizer Object <ctoptimizer>
-
-The Analysis Suite 
-------------------
-
-.. toctree::
-   :maxdepth: 2
-   
diff --git a/da/doc/source/images/carbontracker.png b/da/doc/source/images/carbontracker.png
deleted file mode 100644
index e6da1e1af8e9f84166f8c5518014f351e66efd31..0000000000000000000000000000000000000000
Binary files a/da/doc/source/images/carbontracker.png and /dev/null differ
diff --git a/da/doc/source/index.rst b/da/doc/source/index.rst
deleted file mode 100644
index 05e8ca132f8f0c7e2f13a57a36741aaf472e33f2..0000000000000000000000000000000000000000
--- a/da/doc/source/index.rst
+++ /dev/null
@@ -1,30 +0,0 @@
-.. CarbonTracker Data Assimilation Shell documentation master file, created by
-   sphinx-quickstart on Sun Sep 26 13:39:23 2010.
-   You can adapt this file completely to your liking, but it should at least
-   contain the root `toctree` directive.
-
-Welcome to the CTDAS documentation!
-===================================
-
-This is the starting page for the documentation of the CarbonTracker Data Assimilation Shell (CTDAS).
-
-.. sidebar:: What is CTDAS?
-
-    CTDAS is short for "CarbonTracker Data Assimilation Shell". 
-    This is the implementation of an extendible data assimilation framework for CarbonTracker,
-    developed by NOAA ESRL & Wageningen University, in close cooperation with many partners around the world.
-
-The aim of the CTDAS system is to facilitate the use of CarbonTracker, and to 
-foster its development by its many international partners. At its most basic, the CTDAS is a simple 
-control system for CarbonTracker that deals with the running, optimization, analysis, and time stepping of the system. 
-For advanced users, the CTDAS provides an easy way to extend or modify the system by introducing new configurations 
-for the state vector, for the observations, or even for the transport model or optimization method.
-
-The CarbonTracker development team welcomes suggestions and feedback on the CTDAS system, and its documentation.
-
-Follow the link below to start navigating, or use the "next topic" button in the right-hand-side menu.    
-
-.. toctree::
-   :maxdepth: 1
-
-   List of contents <contents>
diff --git a/da/doc/source/installing.rst b/da/doc/source/installing.rst
deleted file mode 100644
index 2dcb13aad33642b66d71f8770b9650d12ab5e5a4..0000000000000000000000000000000000000000
--- a/da/doc/source/installing.rst
+++ /dev/null
@@ -1,115 +0,0 @@
-.. _installing:
-
-Installing CTDAS
-======================
-
-.. note:: The current documentation assumes you will run CTDAS with the TM5 transport model. Although this is
-          not an absolute requirement, it is advised to start with TM5 before modifying the system to use
-          another transport model
-
-To install a fully working CTDAS system on a new system is a two step process:
-
-   1. Install and configure the TM5 transport model
-   2. Install and configure the CTDAS shell
-
-The first step is needed because the python based CTDAS shell relies on the FORTRAN based TM5 model to perform transport of tracers given a set of fluxes. Typically, the TM5 model needs to be compiled and tested only once, and is then available for subsequent data assimilation experiments. In principle, any transport model can take the place of TM5 to perform this service but no other transport models are currently supported. Although the current TM5 ctdas code only handles the transport and fluxes of CO2, even a novice TM5 user can make a new project to handle the transport of other tracers (CH4, SF6), or multiple tracers (CO2 + CO + 13CO2).
-
-Installing TM5
---------------
-
-The TM5 transport model is freely available to everyone who consents to the terms of use of the model, and thereby becomes a part of the TM5 user community.
-
-.. important::  
-    **Collaboration protocol TM5 model**
-
-    *Rationale*
-
-    The two-way nested TM5 global chemistry transport model has in the last decennia been developed by a consortium of persons 
-    and organizations; namely IMAU, KNMI, JRC, and more recently also at NOAA. Whereas there is no formal property right on 
-    TM5, and it is in general recognized that an increased user group will be beneficial for the scientific development and 
-    credibility of TM5, there may also arise potential problems and conflicts of interest. These may refer to insufficient 
-    communication, insufficient acknowledgement of intellectual efforts, duplication of efforts, the use of the same model 
-    in competing projects, insufficient feed-back of users on potential problems, publishing scientific results without 
-    clarifying what the difference with previous model versions used in previous calculations was, thereby confusing the 
-    outside world as to which result to use/or to believe.
-
-    *Rules of conduct*
-
-    The user of TM5 agrees to:
-
-    * To promptly report on bugs or problems in TM5 via e-mail or web-forum.
-    * To acknowledge the principal authors of TM5 (e.g. Krol and Segers for the overall models, and several others for 
-      sub-modules) in scientific papers or reports. In case that the contribution of a TM5 contributor was essential 
-      for the publication co-authorship should be offered.
-    * To include in a scientific publication a brief discussion of previous results on a certain topic and to explain 
-      the differences.
-    * To participate in the bi-annual TM meetings; or have at least one representative of the group to represent him/her.
-    * To inform at the TM meeting (or before) the TM5 user group of projects in preparation, submitted and in progress, 
-      in order to avoid internal competition.
-    * To introduce new users into this protocol, 
-    * To closely follow and give scientific and technical support to collaborations with external groups that involve 
-      the remote use of TM5, in order to avoid damaging inappropriate use of the model.
-
-To become a member of the TM5 user community, please contact `Wouter Peters at Wageningen University <http://www.maq.wur.nl/UK/Employees/WouterPeters/>`_. 
-
-As a member of the TM5 community you gain access to the TM5 WiKi pages and the TM5 subversion server that allow you to complete an installation of the latest version of the model. The detailed instructions (members only!) are `here <https://www.surfgroepen.nl/sites/tm/Shared%20Wiki/Automatically%20installing%20TM5%20from%20subversion.aspx>`_. Note that in order to run the TM5 CTDAS project (i.e., the fortran code that performs the transport of the CO2 tracer within the overall CTDAS system), you also need to compile extra libraries supported in TM5. Specifically:
-
-    * HDF4 (depends on JPEG, SZIP), http://www.hdfgroup.org/products/hdf4/
-    * HDF5 (depends on JPEG, SZIP), http://www.hdfgroup.org/HDF5/
-    * NetCDF4 (depends on HDF5, UDUNITS, SZIP), http://www.unidata.ucar.edu/software/netcdf/
-    * MPI
-
-Each of the first three libraries has to be built with FORTRAN support, and in addition to a regular build also have to be installed in combination with MPI to support parallel I/O (HDF5, NetCDF4). Also, make sure that HDF4 is compiled without netcdf support. The sets of flags to control this are:
-    * for HDF4::
-
-          ./configure \
-              --disable-netcdf \
-              --enable-fortran \
-              --with-jpeg=${JPEG_HOME} \
-              --with-szlib=${SZIP_HOME} \
-              --disable-shared \
-              --with-pic
-        
-    * for HDF5::
-
-            ./configure \
-                --enable-fortran \
-                --with-szlib=${SZIP_HOME} \
-                --disable-shared \
-                --with-pic 
-
-    * for NetCDF4::
-
-            ./configure \
-                --enable-netcdf-4 \
-                --enable-f90 \
-                --disable-dap \
-                --with-hdf5=${HDF5_HOME} \
-                --with-szlib=${SZIP_HOME} \
-                --disable-shared \
-                --with-pic 
-
-And when compiling with the MPI compilers, also add the flag::
-
-               --enable-parallel   (for HDF5 compiling)
-               --enable-parallel-tests (for NetCDF4 compiling)
-
-At this point, I advise you to continue to the :ref:`tutorial Chapter 0 <tut_chapter0>` for further instructions.
-
-
-Installing the CTDAS shell
---------------------------
-
-.. note::
-    The CTDAS shell is currently not yet available in the open source domain, but is hosted on the password protected server at Wageningen University. Access can be granted to those interested in the project, by sending an email to  `Wouter Peters at Wageningen University <http://www.maq.wur.nl/UK/Employees/WouterPeters/>`_. In the future, we will release CTDAS as a typical python package, possibly through the `Cheeseshop <http://pypi.python.org>`. 
-
-Once you have access to the subversion server, please check out the latest source code to a directory of your choice using::
-
-   svn checkout https://maunaloa.wur.nl/subversion/das/ct/trunk ./
-
-Accept any security certificate permanently (p) and allow the server to store your password unsecure if prompted.
-
-*The CTDAS shell is now installed and ready to be configured!*
-
-At this point, I advise you to continue to the :ref:`tutorial Chapter 1 <tut_chapter1>` for further instructions. 
-
diff --git a/da/doc/source/jet.rst b/da/doc/source/jet.rst
deleted file mode 100644
index 53f426e91df8601d480bdcd9dcae9d96e9744bb9..0000000000000000000000000000000000000000
--- a/da/doc/source/jet.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _jet:
-
-The JET PlatForm
-======================
-
-.. automodule:: da.platform.jet
diff --git a/da/doc/source/maunaloa.rst b/da/doc/source/maunaloa.rst
deleted file mode 100644
index 01453daae5cca9e38a109bb37639b2164ba04db3..0000000000000000000000000000000000000000
--- a/da/doc/source/maunaloa.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _maunaloa:
-
-The Maunaloa PlatForm
-======================
-
-.. automodule:: da.platform.maunaloa
diff --git a/da/doc/source/mysettings.rst b/da/doc/source/mysettings.rst
deleted file mode 100644
index 785be098e675ae340f5fa52427ee286c94db7bd4..0000000000000000000000000000000000000000
--- a/da/doc/source/mysettings.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _mysettings:
-
-Personal Settings Module
-========================
-
-.. automodule:: da.analysis.mysettings
diff --git a/da/doc/source/observationoperator.rst b/da/doc/source/observationoperator.rst
deleted file mode 100644
index 74e86bcc423e131a54f70b701d60f368d274ff48..0000000000000000000000000000000000000000
--- a/da/doc/source/observationoperator.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-.. _statevector:
-
-The Observation Operator Object
-==========================================
-
-.. automodule:: da.baseclasses.observationoperator
-
-
-
-
diff --git a/da/doc/source/observations.rst b/da/doc/source/observations.rst
deleted file mode 100644
index a47bbc0ac222e8aa5975608e6cb018ba6d276e3d..0000000000000000000000000000000000000000
--- a/da/doc/source/observations.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-.. _observations:
-
-The Observation Object
-======================
-
-.. automodule:: da.baseclasses.obs
-
diff --git a/da/doc/source/optimizer.rst b/da/doc/source/optimizer.rst
deleted file mode 100644
index 4632e5fe4db3ce0b3d2b3b549db3c3e7d98e205e..0000000000000000000000000000000000000000
--- a/da/doc/source/optimizer.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-.. _optimizer:
-
-The Optimizer Object
-==========================================
-
-.. automodule:: da.baseclasses.optimizer
-
-
-
-
diff --git a/da/doc/source/overview.rst b/da/doc/source/overview.rst
deleted file mode 100644
index b035f4126e09fd06666cecd024e26bd6bc3adfef..0000000000000000000000000000000000000000
--- a/da/doc/source/overview.rst
+++ /dev/null
@@ -1,52 +0,0 @@
-.. _overview:
-
-.. index:: overview, philosophy, development centers
-
-Overview
-========
-
-This page gives a brief overview of CTDAS, with the intention to provide background information for (potential) new users. Afer reading the information below we recommend contacting one of the CTDAS development centers (Wageningen University and NOAA ESRL), or proceed to the :doc:`tutorial`.
-
-What is CTDAS?
---------------
-
-CTDAS is short for "CarbonTracker Data Assimilation Shell".  
-This is the implementation of an extendible data assimilation framework for CarbonTracker,
-developed by NOAA ESRL & Wageningen University, in close cooperation with many partners around the world.
-
-The aim of the CTDAS system is to facilitate the use of CarbonTracker, and to 
-foster its development by its many international partners. 
-
-
-How can you use CTDAS?
-----------------------
-At its most basic, the CTDAS is a simple 
-control system for CarbonTracker that deals with the running, optimization, analysis, and time stepping of the system. 
-For advanced users, the CTDAS provides an easy way to extend or modify the system by introducing new configurations 
-for the state vector, for the observations, or even for the transport model or optimization method.
-
-CTDAS Design philosophy
------------------------
-
-CTDAS is implemented in python. Each component of the data assimilation system is written as a separate python class, which are 
-combined in a pipeline. Separate pipelines exist for an inverse simulation, or a forward run. Four classes compose the core of 
-the system: Observations, StateVector, ObservationOperator, and Optimizer. These are controlled by a CycleControl object, which 
-itself holds information from class PlatForm and class DaSystem. For each of these seven components, a "baseclass" exists that 
-describes the basic layout, and mandatory methods that each class needs to have. A specific implementation of a baseclass 
-"inherits" this basic behavior, and then extends or overwrites the methods. 
-
-As a typical example, in the module :mod:`~da.baseclasses.observationoperator`, a baseclass :class:`~da.baseclasses.observationoperator.ObservationOperator` is a nearly empty object that contains methods to :meth:`~da.baseclasses.observationoperator.ObservationOperator.Initialize()` the object, to :meth:`~da.baseclasses.observationoperator.ObservationOperator.Validate()` the input for it, to :meth:`~da.baseclasses.observationoperator.ObservationOperator.Run()` the operator, and to :meth:`~da.baseclasses.observationoperator.ObservationOperator.SaveData()` that is needed for a next cycle. The class :class:`~da.tm5.observationoperator.TM5ObservationOperator` is derived from this baseclass, and has a new implementation of each of these methods. When they are called in the pipeline, a TM5 model run is prepared and executed by the methods of the class, returning a set of samples for the optimizer to use in the 
-minimum least squares method.
-
-This design philosophy makes the pipeline completely independent of the specific implementation. It simply calls methods from the clasees it receives in a given order, and ensures that the flow of time and data works. The pipeline does not know (or need to know) whether it is running a gridded inversion with satellite data and the TM5 model without zoom, or whether it is running a methane inversion with TM3. The implementation is thus in the hands of the user.
-
-Extending CTDAS
----------------
-To extend CTDAS, one needs to write new classes that inherit from one of the baseclasses, and have specific functionality implemented under the methods called from the pipeline. This can be a very simple task, or a very hard task depending on which functionality you want to add, or alter.
-
-For instance, to make a new StateVector for your assimilation system with a different number of unknowns and different covariance between them, it would suffice to make a new class "MyStateVector" that inherits everything from the :class:`~da.baseclasses.statevector.StateVector` class, and then write one specific method to replace the standard :meth:`~da.baseclasses.statevector.StateVector.GetCovariance()` method.
-
-For more specific instructions and examples, see the :ref:`Tutorial`.
-
-
-
diff --git a/da/doc/source/pipeline.rst b/da/doc/source/pipeline.rst
deleted file mode 100644
index 02a20531cf4908ddec7b0e514b0468b9ba7ef57f..0000000000000000000000000000000000000000
--- a/da/doc/source/pipeline.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-.. _pipeline:
-
-The Inverse Pipeline
-====================
-
-.. automodule:: da.tools.pipeline
-
diff --git a/da/doc/source/platform.rst b/da/doc/source/platform.rst
deleted file mode 100644
index 1eab80d66004fbec0244d856bf87c8032f8f3c78..0000000000000000000000000000000000000000
--- a/da/doc/source/platform.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _platform:
-
-PlatForm
-======================
-
-.. automodule:: da.baseclasses.platform
diff --git a/da/doc/source/statevector.rst b/da/doc/source/statevector.rst
deleted file mode 100644
index d9e3e9e7f8229460f8d522c32df5a89dea196608..0000000000000000000000000000000000000000
--- a/da/doc/source/statevector.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-.. _statevector:
-
-The StateVector and EnsembleMember Object
-==========================================
-
-.. automodule:: da.baseclasses.statevector
-
-
-
-
diff --git a/da/doc/source/systemrequirements.rst b/da/doc/source/systemrequirements.rst
deleted file mode 100644
index 5d5928a4aa587c633263224299f771424c4ad685..0000000000000000000000000000000000000000
--- a/da/doc/source/systemrequirements.rst
+++ /dev/null
@@ -1,64 +0,0 @@
-.. _systemrequirements:
-
-System Requirements
-======================
-
-The CarbonTracker DAS is programmed in python and uses it built-in functionality for many of its tasks.
-Users of CTDAS are required to have a python installation on their system, with a small set of 
-open source add-ons. The minimum requirements are listed below, categorized by functionality.
-
-Getting the CTDAS code
-----------------------
-
-* A working ``subversion`` (SVN) installation is needed, to check your system type::
-
-     $ svn --version
-
-  if the system returns an error, or a version < 1.5.0, please obtain svn from <http://subversion.tigris.org/>
-
-
-Running CTDAS 
--------------
-
-* A ``python2.4`` or later installation.  
-
-.. note:: ``python3`` is not supported, one needs a python2.x version. To check your python version type::
-
-    $ python --version
-
-* The python module ``numpy``, not included with a standard installation. You can obtain numpy from <http://numpy.scipy.org>
-
-* The python module ``netCDF4`` created by Jeff Whitaker. This package is freely available at <http://code.google.com/p/netcdf4-python/>
-
-Atmospheric Transport
----------------------
-
-An important component of the CTDAS system is the observation operator, usually in the form of an atmospheric
-transport model. This model is a stand-alone piece of code that can subsample your state vector, and return a 
-set of observations. **You therefore require an atmospheric transport model that can run indepently on your 
-platform**. All examples on these pages assume you have access to the TM5 transport model.
-
-.. important:: If you do not currently have access to such a transport model, or have no resources to run such a model
-   for long periods of time and for many ensemble configurations, you have probably stumbled onto this page not
-   understanding exactly what CTDAS is. We refer you to the :ref:`overview` for a more elaborate description.
-
-The TM5 transport model requires:
-
-    * Fortran 90
-    * MPI, HDF4, HDF5, NetCDF4, LAPACK libraries
-    * parallel computing capabilities (16-100 CPUs recommended)
-    * Meteorological driver data (260 Gb of storage for each year of input data)
-
-Analysis of results
--------------------
-
-Although many packages can be used to perform analyses (IDL, MatLAB, GRADS, ...) we have included standard routines for python. Recommended system configuration for these is:
-    * python 2.5 or higher <http://www.python.org/>
-    * numpy <http://numpy.scipy.org>
-    * matplotlib <http://matplotlib.sourceforge.net/>
-    * basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>
-    * netCDF4 <http://code.google.com/p/netcdf4-python/>
-
-
-
-
diff --git a/da/doc/source/tut_chapter0.rst b/da/doc/source/tut_chapter0.rst
deleted file mode 100644
index a5275dd912018b630488e9d749e68c6536373830..0000000000000000000000000000000000000000
--- a/da/doc/source/tut_chapter0.rst
+++ /dev/null
@@ -1,106 +0,0 @@
-.. _tut_chapter0:
-
-Chapter 0: Installing the transport model code
-----------------------------------------------------
-
-**In which you will learn to setup the TM5 model so it can be used within CTDAS**
-
-0.1 Installing the TM5 base trunk with pycasso
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-We recommend to test a base TM5 version first and to make sure that all libraries compile and work. The instructions for the current trunk of TM5 (note that there might be minor changes as the TM5 trunk evolves):
-
-Make a clean directory on your machine to hold TM5, and later also CTDAS
-from that directory, grab the TM5 install file ::
-
-  svn export https://svn.knmi.nl/svn/TM5/install_tm5 
-
-run the install script::
-
-  ./install_tm5
-
-you minimally need to check out from TM5::
-
-  base/trunk
-  grids/glb600x400/trunk
-  levels/ml60/trunk
-  user_output/trunk
-  enkf/branches/ctdas
-
-Once done, make a link to the pycasso runscript::
-
-  ln -s  base/trunk/bin/pycasso_setup_tm5 setup_tm5
-
-If needed, create a pycasso-machine-xxxx.rc file for your architecture::
-
-  cd base/trunk/rc
-  cp pycasso-machine-template.rc pycasso-machine-[yourmachinehere].rc
-
-And edit it such that all keys have appropriate values
-
-Return to the main directory and copy the pycasso-tm5.rc example file::
-
-  cp base/trunk/rc/pycasso-tm5.rc .
-
-Edit this file, specifically the keys::
-
-  my.machine.rc       :  pycasso-machine-[yourmachinehere].rc
-  my.tm5.define       :  slopes with_pycasso without_chemistry without_dry_deposition without_wet_deposition without_sedimentation
-
-Now, try to compile and link the model, read the log file to work your way past possible error messages::
-
-  ./setup_tm5 pycasso-tm5.rc
-
-Once the model has successfully compiled, try to run it, again using the log messages to resolve issues::
-
-  ./setup_tm5 -f -s pycasso-tm5.rc
-
-After a successful completion on one processor, try the code on multiple processors by changing the pycasso-tm5.rc keys::
-
-  par.mpi    :  T
-  par.ntask  :  2
-
-If all works fine, you're ready to try running the TM5 CTDAS project...
-        
-0.2 Installing the TM5 CTDAS code
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Make a copy of the tm5-ctdas.rc example file::
-
-    cp proj/enkf/branches/ctdas/rc/tm5-ctdas.rc .
-
-Edit the tm5-ctdas.rc file to reflect the following keys::
-
-    my.machine.rc       :  pycasso-machine-[yourmachinehere].rc
-    my.tm5.define       :  slopes with_pycasso without_chemistry without_dry_deposition without_wet_deposition without_sedimentation
-
-And pay special attention to the locations of CTDAS input files that are needed (modify the path, and perhaps the filename)::
-
-    ! This is the location of an observations file to read flask data from
-
-    obsdir              : /Volumes/Storage/CO2/carbontracker/input/obsnc
-    output.flask.infile : ${obsdir}/obs_forecast.nc
-
-    ! This is the location of carbontracker a-priori fluxes for ocean, fores, and biosphere. 
-    ! Set the params dir and file to none to simply use prior scaling factors of 1.0
-
-    emis.input.dir          : /Volumes/Storage/CO2/carbontracker/input/ct09
-    ct.params.input.dir     : None
-    ct.params.input.file    : None
-   
-    ! This is the location and name of the fossil fuel input files
-
-    ff_hdf.prefix           : ${emis.input.dir}/co2ff.country_cdiac_bp.seasnameuras.1x1.hdf
-    ff_hdf.varname          : ff_country_new2009b.pro200907271745
- 
-
-If this project compiles successfully and completes a day without major warnings or errors, you are ready to proceed to the next step
-
-.. warning::
-    If you are planning to run the code on a machine with limited memory, or you are running it one only one processor, you might consider
-    reducing the number of tracers (ensemble members) in the project. To do so, edit the file::
-        proj/enkf/branches/ctdas/inv/src/chem_param.F90
-
-This concludes the installation of the TM5 transport model code. Now continue installing the CTDAS shell code, see :ref:`installing <installing>`
-
-
diff --git a/da/doc/source/tut_chapter1.rst b/da/doc/source/tut_chapter1.rst
deleted file mode 100644
index bf68063f9220ec85d0548cb8c1a5259b6724fa38..0000000000000000000000000000000000000000
--- a/da/doc/source/tut_chapter1.rst
+++ /dev/null
@@ -1,171 +0,0 @@
-.. _tut_chapter1:
-
-Chapter 1: Configuring CTDAS to run an experiment
-----------------------------------------------------
-
-**In which you will learn to modify the three primary files that together control a CTDAS experiment**
-
-Step 1: Copy the required job files and rc-files
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-In order to run CTDAS, you'll need: 
-
-    1. A job script written in bash shell that works on your computer
-    2. A control script written in python that initializes the needed objects
-    3. Three rc-files to initialize differen python objects
-
-Luckily, there are pre-made examples avaliable for each of these.
-
-You can grab the example job script in ``da/examples/das.jb`` and copy it to your main directory (the one you used to check out CTDAS). This is an example of its contents: ::
-
-    #$ das.jb 
-    #$ /bin/sh 
-
-     echo "All output piped to file das.out"
-     module load python
-     python das.py rc=da.rc $1  >& das.out
-
-This job script simply prepares your environment and then starts a python executable. Note that the job script can have extra headers that allow you to submit this script to the queue on your computer, if relevant. Configuring this script is part of Step 5 in this chapter.
-
-Next, grab the example python script in ``/da/examples/das.py`` and copy it to the same location. Configuring this script is part of Step 3 in this chapter.
-
-Finally, grab the two example rc-files ``da/rc/da.rc`` and ``da/rc/carbontracker.rc`` and also copy them. We will modify these first. The third rc-file needed is actually the ``tm5-ctdas.rc`` file you created in :ref:`Chapter 0 <tut_chapter0>` so that part is done!
-
-.. note:: The 4 files above must be located in the main directory of the CTDAS tree, i.e., ``${yourdir}/da/ct/trunk/``, only the TM5 rc-file can live somewhere else.
-
-Step 2: Modify the two primary rc-files
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Locations and settings that control the inner workings of the CTDAS system are specified in: 
-
-   1. The ``da.rc`` file, which describes your CTDAS configuration with respect to experiment name, time period, and lag
-   2. The ``carbontracker.rc`` file, which describes your CTDAS configuration with respect to observations and statevector
-
-You can open these in any text editor and replace the values of each key with appropriate settings for your experiment. For
-example: ::
-
-    dir.da_run          : ${HOME}/tmp/test_da
-
-can be replaced by: ::
-
-    dir.da_run          : /scratch/${USER}/my_first_ctdas_run
-
-Which, as you have likely guessed, will change the location where CTDAS creates a directory structure and places input/output files 
-for your simulation. See :mod:`initexit` for more information on these settings. It is especially important to set the keys ::
-
-    da.system.rc        : carbontracker.rc                   ! the settings needed in your inversion system
-    da.obsoperator.rc   : ${HOME}/Modeling/TM5/tm5-ctdas.rc  ! the rc-file needed to run youobservation operator
-
-correctly. The first one refers to the rc-file (2) described above, while the second one refers to the rc-file you used to compile the TM5 model in :ref:`Chapter 0 <tut_chapter0>`. 
-
-.. note:: Files and paths specified in the two basic rc-files must exist, or the system will fail and alert you to the
-         fact that they are missing.
-
-Where the ``da.rc`` file is rather self-explanatory, the ``carbontracker.rc`` file has keys that refer to the inner workings of CTDAS as described in :mod:`dasystem`.
-
-.. note:: The example files are found in the da/rc/ directory of your CTDAS tree. You are encouraged 
-         to always create copies of these primary rc-files before modifying them. The rc filenames are specified to the
-         system before running CTDAS and thus you can use a different copy of these 
-         files for different experiments and purposes. You can even create a 
-         sub-directory with the settings of all your experiments if you like.
-
-With the three rc-files now in-place and modified, we'll continue to modify the python control script.
-
-Step 3: Modify the python control script
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Open the das.py script (or whatever you called it) in an editor and take a look at the code. The python control script first initializes 
-some python objects needed to log activity and to parse command line arguments, and is then followed by 
-a block where all the modules that are needed in your experiment are imported. The example below shows the import of several classes that 
-are needed to run the CarbonTracker CO2 system on a computer referred to as MaunaLoa, using TM5 as 
-an :ref:`observationoperator <Observation Operator>` ::
-
-    ###########################################################################################
-    ### IMPORT THE APPLICATION SPECIFIC MODULES HERE, TO BE PASSED INTO THE MAIN PIPELINE!!! ##
-    ###########################################################################################
-
-    from da.platform.maunaloa import MaunaloaPlatForm 
-    from da.ct.dasystem import CtDaSystem 
-    from da.ct.statevector import CtStateVector 
-    from da.ct.obs import CtObservations 
-    from da.tm5.observationoperator import TM5ObservationOperator 
-    from da.ct.optimizer import CtOptimizer
-    
-Once the classes are loaded successfully, the objects are created. ::
-
-    PlatForm    = MaunaloaPlatForm()
-    DaSystem    = CtDaSystem(DaCycle['da.system.rc'])
-    ObsOperator = TM5ObservationOperator(DaCycle['da.obsoperator.rc'])
-    Samples     = CtObservations()
-    StateVector = CtStateVector()
-    Optimizer   = CtOptimizer()
-
-.. note:: See how the initilization of the DaSystem and ObservationOperator object make use of the keys specified in your primary rc-file ! 
-
-Modification of these objects might be desirable for more advanced users, and in case of the :ref:`platform <Platform>` object, even 
-necessary (see next section). Once the objects are created, they are simply passed to a pipeline for the CTDAS. In the first 
-chapter of the tutorial, we will assume this pipeline is immutable.
-
-The only thing you might want to alter for now is the initialization of the PlatForm object, which is computer specific. How to create your own PlatForm object is described next. After completing this task, make sure you import this object in the ``das.py`` script and initialize it similar to the example ::
-
-    from da.platform.<yourplatform> import <yourplatform> 
-    PlatForm    = <yourplatform>()
-
-Step 4: Creating a PlatForm object for your system
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-From the description of the :ref:`platform <Platform>` object, you will understand that this object is (partly) unique for each user, 
-or at least for the computing environment of each user. Information on the computing system is therefore coded into a specific python object. 
-
-.. warning:: **This object will need to be created for your system by you.**
-
-Luckily, part of the work is already done. In the ``da/baseclasses`` subdirectory you will find a baseclass :ref:`platform <Platform>` 
-which serves as a blueprint for your own ``Platform`` object. This is done through class inheritance. As an example, you can open one 
-of the files ``jet.py`` or ``maunaloa.py`` in the ``da/platform`` directory alongside the original ``da/baseclasses/platform.py``. One
-of the first things to notice is the headers of the ``class PlatForm`` in the baseclass:  ::
-    
-    class PlatForm(object):
-
-and in the derived class::
-
-    from da.baseclasses.platform import PlatForm
-    class PlatForm(PlatForm):
-
-This tells you that the second object actually starts as a copy of the baseclass. But then, we see that the derived class has
-a new implementation of the method ``GetJobTemplate`` from which the first set of lines are below: ::
-
-    def GetJobTemplate(self,joboptions={},block=False):
-        """ 
-            Return the job template for a given computing system, 
-            and fill it with options from the dictionary provided as argument
-        """
-
-        template = """#$ -N jobname \n"""+ \
-                   """#$ -A jobaccount \n"""+ \
-                   """#$ -pe jobnodes \n"""+ \
-                   """#$ -l h_rt=jobtime \n"""+ \
-                   """#$ -S jobshell \n"""+ \
-                   """#$ -o joblog \n"""+ \
-                   """#$ -cwd\n"""+ \
-                   """#$ -r n\n"""+ \
-                   """#$ -V\n"""+ \
-                   """#$ -j y\n"""
-
-While the baseclass did not have any functionality, a call to the ``GetJobTemplate`` method of the derived class will actually return a template for 
-a job script on NOAA's "jet" supercomputer, so that we can submit jobs to its queue. **By modifying each of the methods in your own 
-derived PlatForm class in the same way, you can make each method work on your system**. 
-
-Once you have created your own PlatForm object, and you have successfully imported and instantiated it in your primary python run script, 
-you are ready for the last step.
-
-.. note:: Sometimes it is faster and easier to test your newly created class 'offline'. At the end of your module, following the __main__ 
-         section you can add lines to test your PlatForm object before plugging it into the CTDAS. 
-
-Step 5: Modifying the job script
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-As a final step, open your job script (1) again and see whether it calls the right python control script, and whether it has the right rc filename specified. If so, you are ready for :ref:`Chapter 2: Running your first experiment <tut_chapter2>`.
-
-
-
-
diff --git a/da/doc/source/tut_chapter2.rst b/da/doc/source/tut_chapter2.rst
deleted file mode 100644
index a7c243774e042b05e7204ca39b653eb209d13678..0000000000000000000000000000000000000000
--- a/da/doc/source/tut_chapter2.rst
+++ /dev/null
@@ -1,28 +0,0 @@
-.. _tut_chapter2:
-
-Chapter 2: Running your first experiment
-----------------------------------------------------
-
-**In which you will run a small sample CTDAS experiment**
-
-Configure a small test run
-^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Modify the primary da.rc file to: ::  
-
-    time.start   : 2000-01-01 00:00:00
-    time.finish  : 2000-01-03 00:00:00
-    time.cycle   : 1
-    time.nlag    : 3
-
-If these settings do not mean anything to you yet, it is probably good to go back and read again how CTDAS works.
-
-Next, modify the das.py script to: ::
-
-    (a) have the TM5ObservationOperator object point to the correct (compiled and tested) tm5.rc file
-    (b) Initialize to the correct DaSystem rc-file + object
-    (c) Initialize to the correct PlatForm object
-
-I refer to the tutorial chapter 2 is you need a reminder on how to do this.
-
-
diff --git a/da/doc/source/tut_chapter3.rst b/da/doc/source/tut_chapter3.rst
deleted file mode 100644
index 2523c1aeaa84aac3fe2051d28fce7ad92ff5eead..0000000000000000000000000000000000000000
--- a/da/doc/source/tut_chapter3.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-.. _tut_chapter3:
-
-Chapter 3: Controlling your experiment: Cold start, Restart, or Recover from crash
-------------------------------------------------------------------------------------
-To be completed...
diff --git a/da/doc/source/tut_chapter4.rst b/da/doc/source/tut_chapter4.rst
deleted file mode 100644
index 6850ccc69b08d4ea12e2aed1b31bf30c47489a46..0000000000000000000000000000000000000000
--- a/da/doc/source/tut_chapter4.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-.. _tut_chapter4:
-
-Chapter 4: Adding more observations 
-----------------------------------------------------
-To be completed...
diff --git a/da/doc/source/tut_chapter5.rst b/da/doc/source/tut_chapter5.rst
deleted file mode 100644
index aba8dae6d6a441f026ee9a8a2ef3818b6a827e1a..0000000000000000000000000000000000000000
--- a/da/doc/source/tut_chapter5.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-.. _tut_chapter5:
-
-Chapter 5: Modifying the state vector
-----------------------------------------------------
-To be completed...
diff --git a/da/doc/source/tut_chapter6.rst b/da/doc/source/tut_chapter6.rst
deleted file mode 100644
index 207e95a4cf118d67ddda96f9d9768fea1825fae1..0000000000000000000000000000000000000000
--- a/da/doc/source/tut_chapter6.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-.. _tut_chapter6:
-
-Chapter 6: Changing the covariance structure
-----------------------------------------------------
-To be completed...
diff --git a/da/doc/source/tut_chapter7.rst b/da/doc/source/tut_chapter7.rst
deleted file mode 100644
index 64033bbf756ad64f4dd2de8ca2c2fca9627da576..0000000000000000000000000000000000000000
--- a/da/doc/source/tut_chapter7.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-.. _tut_chapter7:
-
-Chapter 7: Adding a new type of observations 
-----------------------------------------------------
-To be completed...
diff --git a/da/doc/source/tutorial.rst b/da/doc/source/tutorial.rst
deleted file mode 100644
index d2d1fbe2a98cb6e33b72a5d965d0e6901379cc3a..0000000000000000000000000000000000000000
--- a/da/doc/source/tutorial.rst
+++ /dev/null
@@ -1,28 +0,0 @@
-.. _tutorial:
-
-Tutorial
-======================
-
-The CTDAS tutorial is written to provide some guidance for common tasks when running, extending, or modifying CTDAS for your own purpopes. 
-
-.. warning:: It is not a basic course in data assimilation techniques, nor in the use of python, object-oriented programming, or UNIX. 
-
-The descriptions assume that you are familiar with these. It also assumes that you have successfully compiled and 
-run a transport model (usually TM5), that can serve as observation operator for your CTDAS runs. Instructions 
-on how to obtain, compile, or write such a project are not included here. We refer to the :ref:`installation <installing>` section for 
-further help with those steps. 
-
-.. toctree::
-   :maxdepth: 1
-
-   Chapter 0: Configuring TM5 to run as transport model<tut_chapter0>
-   Chapter 1: Configuring CTDAS to run an experiment <tut_chapter1>
-   Chapter 2: Running your first experiment  <tut_chapter2>
-   Chapter 3: Controlling your experiment: Cold start, Restart, or Recover from crash<tut_chapter3>
-   Chapter 4: Adding more observations <tut_chapter4>
-   Chapter 5: Modifying the state vector<tut_chapter5>
-   Chapter 6: Changing the covariance structure<tut_chapter6>
-   Chapter 7: Adding a new type of observations <tut_chapter7>
-
-
-
diff --git a/da/examples/das.jb b/da/examples/das.jb
deleted file mode 100755
index 541c08aa51c7320b6e65d9e3ed389eb6ef7b7b48..0000000000000000000000000000000000000000
--- a/da/examples/das.jb
+++ /dev/null
@@ -1,9 +0,0 @@
-#$ das.py 
-#$ co2 
-#$ nserial 1 
-#$ 01:30:00 
-#$ /bin/sh 
-
- echo "All output piped to file das.out"
- module load python
- python das.py rc=da.rc $1  >& das.out
diff --git a/da/examples/das.py b/da/examples/das.py
deleted file mode 100755
index 6e61874a431d3a1ad1ecc8903832cd929708660d..0000000000000000000000000000000000000000
--- a/da/examples/das.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env python
-
-#################################################################################################
-# First order of business is always to make all other python modules accessible through the path
-#################################################################################################
-
-import sys
-import os
-import logging
-
-sys.path.append(os.getcwd())
-
-#################################################################################################
-# Next, import the tools needed to initialize a data assimilation cycle
-#################################################################################################
-
-from da.tools.initexit import StartLogger
-from da.tools.initexit import ValidateOptsArgs
-from da.tools.initexit import ParseOptions
-
-#################################################################################################
-# Parse and validate the command line options, start logging
-#################################################################################################
-
-StartLogger()
-opts, args = ParseOptions()
-opts, args = ValidateOptsArgs(opts, args)
-
-#################################################################################################
-# Create the Cycle Control object for this job    
-#################################################################################################
-
-from da.tools.initexit import CycleControl
-
-DaCycle = CycleControl(opts, args)
-
-###########################################################################################
-### IMPORT THE APPLICATION SPECIFIC MODULES HERE, TO BE PASSED INTO THE MAIN PIPELINE!!! ##
-###########################################################################################
-
-from da.tools.pipeline import EnsembleSmootherPipeline
-from da.platform.maunaloa import MaunaloaPlatForm 
-from da.ct.dasystem import CtDaSystem 
-from da.ct.statevector import CtStateVector 
-from da.ct.obs import CtObservations 
-from da.tm5.observationoperator import TM5ObservationOperator 
-from da.ct.optimizer import CtOptimizer
-
-PlatForm = MaunaloaPlatForm()
-DaSystem = CtDaSystem(DaCycle['da.system.rc'])
-ObsOperator = TM5ObservationOperator(DaCycle['da.obsoperator.rc'])
-Samples = CtObservations()
-StateVector = CtStateVector()
-Optimizer = CtOptimizer()
-
-##########################################################################################
-################### ENTER THE PIPELINE WITH THE OBJECTS PASSED BY THE USER ###############
-##########################################################################################
-
-from da.tools.pipeline import header, footer
-
-logging.info(header + "Entering Pipeline " + footer)
-
-EnsembleSmootherPipeline(DaCycle, PlatForm, DaSystem, Samples, StateVector, ObsOperator, Optimizer)
-
-
-##########################################################################################
-################### All done, extra stuff can be added next, such as analysis
-##########################################################################################
-
-logging.info(header + "Starting analysis" + footer)
-
-
-from da.analysis.expand_fluxes import SaveWeeklyAvg1x1Data
-from da.analysis.expand_fluxes import SaveWeeklyAvgStateData
-from da.analysis.expand_fluxes import SaveWeeklyAvgTCData
-
-SaveWeeklyAvg1x1Data(DaCycle, StateVector)
-SaveWeeklyAvgStateData(DaCycle, StateVector)
-SaveWeeklyAvgTCData(DaCycle, StateVector)
-#LU is that thing necessary?
-sys.exit(0)
-
-
diff --git a/da/examples/dasgridded.jb b/da/examples/dasgridded.jb
deleted file mode 100755
index 81c6d5f9f39f72058e9be23633d08740522bcc4e..0000000000000000000000000000000000000000
--- a/da/examples/dasgridded.jb
+++ /dev/null
@@ -1,9 +0,0 @@
-#$ dasgridded.jb 
-#$ co2 
-#$ nserial 1 
-#$ 01:30:00 
-#$ /bin/sh 
-
- echo "All output piped to file dasgridded.out"
- module load python
- python dasgridded.py rc=dagridded.rc $1  >& dasgridded.out
diff --git a/da/examples/dasgridded.py b/da/examples/dasgridded.py
deleted file mode 100755
index 5649b45c4025ad9688b2a1b489d5a39ec66d8332..0000000000000000000000000000000000000000
--- a/da/examples/dasgridded.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env python
-
-#################################################################################################
-# First order of business is always to make all other python modules accessible through the path
-#################################################################################################
-
-import sys
-import os
-import logging
-dummy = sys.path.append(os.getcwd())
-
-#################################################################################################
-# Next, import the tools needed to initialize a data assimilation cycle
-#################################################################################################
-
-from da.tools.initexit import StartLogger
-from da.tools.initexit import ValidateOptsArgs
-from da.tools.initexit import ParseOptions
-
-#################################################################################################
-# Parse and validate the command line options, start logging
-#################################################################################################
-
-dummy       = StartLogger()
-opts, args  = ParseOptions()
-opts,args   = ValidateOptsArgs(opts,args)
-
-#################################################################################################
-# Create the Cycle Control object for this job    
-#################################################################################################
-
-from da.tools.initexit import CycleControl
-
-DaCycle     = CycleControl(opts,args)
-
-###########################################################################################
-### IMPORT THE APPLICATION SPECIFIC MODULES HERE, TO BE PASSED INTO THE MAIN PIPELINE!!! ##
-###########################################################################################
-
-from da.tools.pipeline import EnsembleSmootherPipeline
-from da.platform.maunaloa import MaunaloaPlatForm 
-#from da.ct.dasystem import CtDaSystem 
-from da.ctgridded.dasystem import CtGriddedDaSystem 
-#from da.ct.statevector import CtStateVector 
-from da.ctgridded.statevector import CtGriddedStateVector 
-from da.ct.obs import CtObservations 
-from da.tm5.observationoperator import TM5ObservationOperator 
-from da.ct.optimizer import CtOptimizer
-
-PlatForm    = MaunaloaPlatForm()
-DaSystem    = CtGriddedDaSystem(DaCycle['da.system.rc'])
-ObsOperator = TM5ObservationOperator(DaCycle['da.obsoperator.rc'])
-Samples     = CtObservations()
-StateVector = CtGriddedStateVector()
-Optimizer   = CtOptimizer()
-
-##########################################################################################
-################### ENTER THE PIPELINE WITH THE OBJECTS PASSED BY THE USER ###############
-##########################################################################################
-
-from da.tools.pipeline import header,footer
-
-msg          = header+"Entering Pipeline "+footer      ; logging.info(msg) 
-
-EnsembleSmootherPipeline(DaCycle,PlatForm, DaSystem, Samples,StateVector,ObsOperator,Optimizer)
-
-
-##########################################################################################
-################### All done, extra stuff can be added next, such as analysis
-##########################################################################################
-
-msg          = header+"Starting analysis"+footer      ; logging.info(msg) 
-
-from da.analysis.expand_fluxes import SaveWeeklyAvg1x1Data
-from da.analysis.expand_fluxes import SaveWeeklyAvgStateData
-from da.analysis.expand_fluxes import SaveWeeklyAvgTCData
-
-savedas      = SaveWeeklyAvg1x1Data(DaCycle, StateVector)
-savedas      = SaveWeeklyAvgStateData(DaCycle, StateVector)
-savedas      = SaveWeeklyAvgTCData(DaCycle, StateVector)
-
-sys.exit(0)
-
-
diff --git a/da/examples/dasjet.jb b/da/examples/dasjet.jb
deleted file mode 100644
index 6dd86320a93ea57eecd0596a406bbadb1f8e3156..0000000000000000000000000000000000000000
--- a/da/examples/dasjet.jb
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env sh
-# dasjet.sh
-
-#$ -A co2 
-#$ -pe nserial 1 
-#$ -l h_rt=03:00:00 
-#$ -S /bin/sh 
-#$ -cwd
-#$ -j y
-#$ -r n
-#$ -V
-
-echo "All output piped to file das.out"
-python dasjet.py rc=dajet.rc $1  >& das.out
diff --git a/da/examples/dasjet.py b/da/examples/dasjet.py
deleted file mode 100755
index e991aa6fd073c9ffe8c227c330d6a2e0f42ac580..0000000000000000000000000000000000000000
--- a/da/examples/dasjet.py
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env python
-
-#################################################################################################
-# First order of business is always to make all other python modules accessible through the path
-#################################################################################################
-
-import sys
-import os
-import logging
-dummy = sys.path.append(os.getcwd())
-
-#################################################################################################
-# Next, import the tools needed to initialize a data assimilation cycle
-#################################################################################################
-
-from da.tools.initexit import StartLogger
-from da.tools.initexit import ValidateOptsArgs
-from da.tools.initexit import ParseOptions
-
-#################################################################################################
-# Parse and validate the command line options, start logging
-#################################################################################################
-
-dummy       = StartLogger()
-opts, args  = ParseOptions()
-opts,args   = ValidateOptsArgs(opts,args)
-
-#################################################################################################
-# Create the Cycle Control object for this job    
-#################################################################################################
-
-from da.tools.initexit import CycleControl
-
-DaCycle     = CycleControl(opts,args)
-
-###########################################################################################
-### IMPORT THE APPLICATION SPECIFIC MODULES HERE, TO BE PASSED INTO THE MAIN PIPELINE!!! ##
-###########################################################################################
-
-from da.tools.pipeline import EnsembleSmootherPipeline
-from da.platform.jet import JetPlatForm 
-from da.ct.dasystem import CtDaSystem 
-from da.ct.statevector import CtStateVector 
-from da.ct.obs import CtObservations 
-from da.tm5.observationoperator import TM5ObservationOperator 
-from da.ct.optimizer import CtOptimizer
-
-PlatForm    = JetPlatForm()
-DaSystem    = CtDaSystem(DaCycle['da.system.rc'])
-ObsOperator = TM5ObservationOperator(DaCycle['da.obsoperator.rc'])
-Samples     = CtObservations()
-StateVector = CtStateVector()
-Optimizer   = CtOptimizer()
-
-##########################################################################################
-################### ENTER THE PIPELINE WITH THE OBJECTS PASSED BY THE USER ###############
-##########################################################################################
-
-from da.tools.pipeline import header,footer
-
-msg          = header+"Entering Pipeline "+footer      ; logging.info(msg) 
-
-EnsembleSmootherPipeline(DaCycle,PlatForm, DaSystem, Samples,StateVector,ObsOperator,Optimizer)
-
-
-##########################################################################################
-################### All done
-##########################################################################################
-
diff --git a/da/platform/__init__.py b/da/platform/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/da/platform/jet.py b/da/platform/jet.py
deleted file mode 100755
index 53ae881eccca8ce3555305b762137cb5d74a35a5..0000000000000000000000000000000000000000
--- a/da/platform/jet.py
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/env python
-# jet.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 06 Sep 2010.
-
-"""
-
-import sys
-import os
-import logging
-import subprocess
-
-from da.baseclasses.platform import PlatForm
-
-std_joboptions={'jobname':'test','jobaccount':'co2','jobnodes':'nserial 1','jobshell':'/bin/sh','depends':'','jobtime':'00:30:00','joblog':os.getcwd()}
-
-class JetPlatForm(PlatForm):
-    def __init__(self):
-        self.Identifier     = 'NOAA jet'    # the identifier gives the platform name
-        self.Version        = '1.0'     # the platform version used
-
-        msg1  = '%s platform object initialized'%self.Identifier ; logging.debug(msg1)
-        msg2  = '%s version: %s'%(self.Identifier,self.Version) ; logging.debug(msg2)
-
-
-    def GetJobTemplate(self,joboptions={},block=False):
-        """ Return the job template for a given computing system, and fill it with options from the dictionary provided as argument"""
-
-        template = """#$ -N jobname \n"""+ \
-                   """#$ -A jobaccount \n"""+ \
-                   """#$ -pe jobnodes \n"""+ \
-                   """#$ -l h_rt=jobtime \n"""+ \
-                   """#$ -S jobshell \n"""+ \
-                   """#$ -o joblog \n"""+ \
-                   """#$ -cwd\n"""+ \
-                   """#$ -r n\n"""+ \
-                   """#$ -V\n"""+ \
-                   """#$ -j y\n"""
-
-        if 'depends' in joboptions:
-            template += """#$ -hold_jid depends \n"""
-
-        if block:
-            template += """#$ -sync y\n"""
-
-        # First replace from passed dictionary
-        for k,v in joboptions.iteritems():
-            while k in template:
-                template = template.replace(k,v)
-
-        # Fill remaining values with std_options
-        for k,v in std_joboptions.iteritems():
-            while k in template:
-                template = template.replace(k,v)
-
-        return template
-
-    def GetMyID(self):
-        try:
-            return os.environ['JOB_ID']
-        except:
-            return os.getpid()
-
-    def SubmitJob(self,jobfile,joblog=None,block=False): 
-        """ This method submits a jobfile to the queue, and returns the queue ID """
-
-               
-        cmd     = ["qsub",jobfile]
-        msg = "A new task will be started (%s)"%cmd  ; logging.info(msg)
-        output  = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]  ; logging.info(output)
-        jobid   = output.split()[2]
-        retcode = output.split()[-1]
-
-        return retcode
-
-    def KillJob(self,jobid):                   
-        """ This method kills a running job """
-        
-        output = subprocess.Popen(['qdel',jobid], stdout=subprocess.PIPE).communicate()[0]  ; logging.info(output)
-
-        return output
-
-    def StatJob(self,jobid):                   
-        """ This method gets the status of a running job """
-        import subprocess
-        
-        #output = subprocess.Popen(['sgestat'], stdout=subprocess.PIPE).communicate()[0]  ; logging.info(output)
-
-        return ''
-
-if __name__ == "__main__":
-    pass
diff --git a/da/platform/maunaloa.py b/da/platform/maunaloa.py
deleted file mode 100755
index 13845aa7df63e0b22042565da4c72e0d2650f5d9..0000000000000000000000000000000000000000
--- a/da/platform/maunaloa.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env python
-# maunaloa.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 06 Sep 2010.
-
-"""
-
-import sys
-import os
-import logging
-import subprocess
-
-from da.baseclasses.platform import PlatForm, std_joboptions
-
-class MaunaloaPlatForm(PlatForm):
-    def __init__(self):
-
-        self.Identifier     = 'WU maunaloa'    # the identifier gives the platform name
-        self.Version        = '1.0'     # the platform version used
-
-    def GetJobTemplate(self,joboptions={},block=False):
-        """ 
-        Returns the job template for a given computing system, and fill it with options from the dictionary provided as argument.
-        The job template should return the preamble of a job that can be submitted to a queue on your platform, 
-        examples of popular queuing systems are:
-            - SGE
-            - MOAB
-            - XGrid
-            -
-
-        A list of job options can be passed through a dictionary, which are then filled in on the proper line,
-        an example is for instance passing the dictionary {'account':'co2'} which will be placed 
-        after the ``-A`` flag in a ``qsub`` environment.
-
-        An extra option ``block`` has been added that allows the job template to be configured to block the current
-        job until the submitted job in this template has been completed fully.
-        """
-
-        template = """## \n"""+ \
-                   """## This is a set of dummy names, to be replaced by values from the dictionary \n"""+ \
-                   """## Please make your own platform specific template with your own keys and place it in a subfolder of the da package.\n """+ \
-                   """## \n"""+ \
-                   """ \n"""+ \
-                   """#$ jobname \n"""+ \
-                   """#$ jobaccount \n"""+ \
-                   """#$ jobnodes \n"""+ \
-                   """#$ jobtime \n"""+ \
-                   """#$ jobshell \n"""+ \
-                   """\n"""+ \
-                   """source /usr/local/Modules/3.2.8/init/sh\n"""+ \
-                   """module load python\n"""+ \
-                   """\n"""
-
-        if 'depends' in joboptions:
-            template += """#$ -hold_jid depends \n"""
-
-        # First replace from passed dictionary
-        for k,v in joboptions.iteritems():
-            while k in template:
-                template = template.replace(k,v)
-
-        # Fill remaining values with std_options
-        for k,v in std_joboptions.iteritems():
-            while k in template:
-                template = template.replace(k,v)
-
-        return template
-
-
-        msg1  = 'Platform initialized: %s'%self.Identifier      ; logging.info(msg1)
-        #msg2  = '%s version: %s'%(self.Identifier,self.Version) ; logging.info(msg2)
-
-
-if __name__ == "__main__":
-    pass
diff --git a/da/rc/NamingScheme.wp_Mar2011.rc b/da/rc/NamingScheme.wp_Mar2011.rc
deleted file mode 100644
index 094cb6c5a0ac313b79933f38a15bd9e75414ec98..0000000000000000000000000000000000000000
--- a/da/rc/NamingScheme.wp_Mar2011.rc
+++ /dev/null
@@ -1,73 +0,0 @@
-! output naming scheme for CarbonTracker savestate files, version May 07 by Wouter Peters
-
-! assimilated quantities, observation details
-
-assimilated.co2_mixingratio.observed    : co2_obs_fcast
-assimilated.latitude.observed           : lat_obs_fcast
-assimilated.longitude.observed          : lon_obs_fcast
-assimilated.height.observed             : height_obs_fcast
-assimilated.code.observed               : stationnames_obs_fcast
-assimilated.time.observed               : itau_obs_fcast
-assimilated.eventnumber.observed        : eventnumber_obs_fcast
-
-! assimilated quantities, simulation details
-
-assimilated.co2_mixingratio.simulated   : co2_sim_fcast
-assimilated.flag.simulated              : flag_sim_fcast
-assimilated.hphr.simulated              : hqhr_sim_fcast
-assimilated.modeldatamismatch.simulated : error_sim_fcast
-assimilated.co2_mixingratio.ensemble.simulated   : dF
-
-! analysis quantities, sampled after each optimization and thus not necessarily final
-
-analyzed.co2_mixingratio.simulated : co2_sim_ana
-
-! same for quantities sampled from optimized/final results
-
-final.co2_mixingratio.observed    : co2_obs_final
-final.latitude.observed           : lat_obs_final
-final.longitude.observed          : lon_obs_final
-final.height.observed             : height_obs_final
-final.code.observed               : stationnames_obs_final
-final.time.observed               : itau_obs_final
-final.eventnumber.observed        : eventnumber_obs_final
-
-! final optimized quantities, simulation details
-
-final.co2_mixingratio.simulated         : co2_sim_final
-final.co2_bg_mixingratio.simulated      : co2_bg_sim_final
-final.co2_fossil_mixingratio.simulated  : co2_ff_sim_final
-final.co2_fires_mixingratio.simulated   : co2_fires_sim_final
-final.co2_bio_mixingratio.simulated     : co2_bio_sim_final
-final.co2_ocean_mixingratio.simulated   : co2_ocean_sim_final
-final.co2_mixingratio.ensemble.simulated : dF_f
-
-! background fluxes
-
-background.co2.fossil.flux  : flux_ff_prior_mean
-background.co2.fires.flux   : flux_fires_prior_mean
-background.co2.bio.flux     : flux_bio_prior_mean
-background.co2.ocean.flux   : flux_ocean_prior_mean
-background.co2.res.flux     : flux_res_prior_mean
-background.co2.gpp.flux     : flux_gpp_prior_mean
-
-! optimized fluxes
-
-final.co2.fossil.flux  : flux_ff_post_mean
-final.co2.fires.flux   : flux_fires_post_mean
-final.co2.bio.flux     : flux_bio_post_mean
-final.co2.ocean.flux   : flux_ocean_post_mean
-final.co2.res.flux     : flux_res_post_mean
-final.co2.gpp.flux     : flux_gpp_post_mean
-
-! background parameters
-
-background.param.mean       : xpc
-background.param.ensemble   : pdX
-
-! optimized parameters
-
-final.param.mean       : xac
-final.param.ensemble   : adX
-final.param.mean.1x1   : flux_multiplier_m
-
diff --git a/da/rc/carbontracker.rc b/da/rc/carbontracker.rc
deleted file mode 100644
index c9364ce32c4a443d9086ed41d8b3c1e0195f70c9..0000000000000000000000000000000000000000
--- a/da/rc/carbontracker.rc
+++ /dev/null
@@ -1,22 +0,0 @@
-!!! Info for the CarbonTracker data assimilation system
-
-datadir         : /Volumes/Storage/CO2/carbontracker/input/ct08/
-obs.input.dir   : ${datadir}/obsnc/with_fillvalue
-obs.input.fname : obs_forecast.nc
-ocn.covariance  : ${datadir}/oif_p3_era40.dpco2.2000.01.hdf 
-bio.covariance  : ${datadir}/covariance_bio_olson19.nc
-deltaco2.prefix : oif_p3_era40.dpco2
-regtype         : olson19_oif30
-nparameters     : 240
-random.seed     : 4385
-regions.dir     : /Users/peters/Library/Code/carbontracker/shared/aux
-regionsfile     : ${regions.dir}/regions.nc
-
-! Include a naming scheme for the variables
-
-#include NamingScheme.wp_Mar2011.rc 
-
-! Info on the sites file used
-
-obs.sites.rc        : ${datadir}/sites_and_weights_co2.ct10.rc
-
diff --git a/da/rc/carbontrackergridded.rc b/da/rc/carbontrackergridded.rc
deleted file mode 100644
index 27296828283dd5b7b2b43b7c12e09ffa7a6ee888..0000000000000000000000000000000000000000
--- a/da/rc/carbontrackergridded.rc
+++ /dev/null
@@ -1,25 +0,0 @@
-!!! Info for the CarbonTracker data assimilation system
-
-datadir         : /Volumes/Storage/CO2/carbontracker/input/ct08/
-obs.input.dir   : ${datadir}/obsnc/with_fillvalue
-obs.input.fname : obs_forecast.nc
-
-ocn.covariance  : ${datadir}/oif_p3_era40.dpco2.2000.01.hdf 
-deltaco2.prefix : oif_p3_era40.dpco2
-
-bio.cov.dir     : /Users/peters/python/CTDAS
-bio.cov.prefix  : cov_ecoregion_
-
-regtype         : gridded_oif30
-nparameters     : 9738
-random.seed     : 4385
-regionsfile     : ${bio.cov.dir}/griddedparameters.nc
-
-! Include a naming scheme for the variables
-
-#include NamingScheme.wp_Mar2011.rc 
-
-! Info on the sites file used
-
-obs.sites.rc        : ${datadir}/sites_and_weights_co2.ct10.rc
-
diff --git a/da/rc/carbontrackerjet.rc b/da/rc/carbontrackerjet.rc
deleted file mode 100644
index c7f99401897c51edaaedea4f6ef3397c3a128a72..0000000000000000000000000000000000000000
--- a/da/rc/carbontrackerjet.rc
+++ /dev/null
@@ -1,17 +0,0 @@
-!!! Info for the CarbonTracker data assimilation system
-
-datadir         : /lfs0/projects/co2/input/ct_new_2010
-obs.input.dir   : ${datadir}/obsnc/with_fillvalue
-obs.input.fname : obs_forecast.nc
-ocn.covariance  : ${datadir}/oif_p3_era40.dpco2.2000.01.hdf 
-bio.covariance  : ${datadir}/covariance_bio_olson19.nc
-deltaco2.prefix : oif_p3_era40.dpco2
-regtype         : olson19_oif30
-nparameters     : 240
-random.seed     : 4385
-regionsfile     : transcom_olson19_oif30.hdf
-
-! Info on the sites file used
-
-obs.sites.rc        : ${datadir}/sites_and_weights_co2.ct10.rc
-
diff --git a/da/rc/da.rc b/da/rc/da.rc
deleted file mode 100644
index 9f0493f168ef68157378fb5ad838d80bc5d41b06..0000000000000000000000000000000000000000
--- a/da/rc/da.rc
+++ /dev/null
@@ -1,20 +0,0 @@
-! Info on the data assimilation cycle
-
-time.restart        : False
-time.start          : 2000-01-01 00:00:00
-time.finish         : 2008-01-01 00:00:00
-time.cycle          : 1
-time.nlag           : 3
-dir.da_run          : /Storage/CO2/peters/test_da/
-
-
-! Info on the DA system used
-
-da.system           : CarbonTracker
-da.system.rc        : da/rc/carbontracker.rc
-
-! Info on the forward model to be used
-
-da.obsoperator         : TM5
-da.obsoperator.rc      : ${HOME}/Modeling/TM5/tm5-ctdas.rc
-da.optimizer.nmembers  : 16
diff --git a/da/rc/dagridded.rc b/da/rc/dagridded.rc
deleted file mode 100644
index c14ace782acc80128d0a46fa3330f9db59c0fad5..0000000000000000000000000000000000000000
--- a/da/rc/dagridded.rc
+++ /dev/null
@@ -1,20 +0,0 @@
-! Info on the data assimilation cycle
-
-time.restart        : False
-time.start          : 2000-01-01 00:00:00
-time.finish         : 2008-01-01 00:00:00
-time.cycle          : 1
-time.nlag           : 3
-dir.da_run          : /Storage/CO2/peters/test_dagridded/
-
-
-! Info on the DA system used
-
-da.system           : CarbonTracker
-da.system.rc        : da/rc/carbontrackergridded.rc
-
-! Info on the forward model to be used
-
-da.obsoperator         : TM5
-da.obsoperator.rc      : ${HOME}/Modeling/TM5/tm5-ctdas.rc
-da.optimizer.nmembers  : 16
diff --git a/da/rc/dajet.rc b/da/rc/dajet.rc
deleted file mode 100644
index ac28d71ab11ef19efb1fe22ecdebafdb8f2587cd..0000000000000000000000000000000000000000
--- a/da/rc/dajet.rc
+++ /dev/null
@@ -1,19 +0,0 @@
-! Info on the data assimilation cycle
-
-time.restart        : False
-time.start          : 2000-01-01 00:00:00
-time.finish         : 2008-01-01 00:00:00
-time.cycle          : 1
-time.nlag           : 3
-dir.da_run          : /lfs0/projects/co2/peters/test_da
-
-! Info on the DA system used
-
-da.system           : CarbonTracker
-da.system.rc        : da/rc/carbontrackerjet.rc
-
-! Info on the forward model to be used
-
-da.obsoperator         : TM5
-da.obsoperator.rc      : ${HOME}/TM/TM5_new/tm5-ctdas.rc
-da.optimizer.nmembers  : 16
diff --git a/da/test/test_optimizer.py b/da/test/test_optimizer.py
deleted file mode 100755
index e86a7d6eddfbcd55367ce4b0bd2b540848e52c87..0000000000000000000000000000000000000000
--- a/da/test/test_optimizer.py
+++ /dev/null
@@ -1,243 +0,0 @@
-#!/usr/bin/env python
-# test_optimizer.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 04 Aug 2010.
-
-"""
-
-def SerialPyAgainstSerialFortran():
-    """ Test the solution of the serial algorithm against the CT cy2 fortran generated one """
-
-    # get data from the savestate.hdf file from the first cycle of CarbonTracker 2009 release
-
-    print "WARNING: The optimization algorithm has changed from the CT2009 release because of a bug"
-    print "WARNING: in the fortran code. Hence, the two solutions calculated are no longer the same."
-    print "WARNING: To change the python algorithm so that it corresponds to the fortran, change the"
-    print "WARNING: loop from m=n+1,nfcast to m=1,nfcast"
-
-    savefile = '/data/CO2/peters/carbontracker/raw/ct09rc0i/20000108/savestate.hdf'
-    print savefile
-
-    f = Nio.open_file(savefile,'r')
-    obs = f.variables['co2_obs_fcast'].get_value()
-    sel_obs = obs.shape[0]
-
-    dims      = ( int(DaCycle.da_settings['time.nlag']),
-                  int(DaCycle.da_settings['forecast.nmembers']),
-                  int(DaCycle.DaSystem.da_settings['nparameters']),
-                  sel_obs,  )
-
-    nlag,nmembers,nparams, nobs = dims
-
-    optserial = CtOptimizer(dims)
-    opt = optserial
-
-    opt.SetLocalization('CT2007')
-
-    obs = f.variables['co2_obs_fcast'].get_value()[0:nobs]
-    opt.obs = obs
-    sim = f.variables['co2_sim_fcast'].get_value()[0:nobs]
-    opt.Hx = sim
-    error = f.variables['error_sim_fcast'].get_value()[0:nobs]
-    flags = f.variables['flag_sim_fcast'].get_value()[0:nobs]
-    opt.flags = flags
-    simana = f.variables['co2_sim_ana'].get_value()[0:nobs]
-
-    for n in range(nobs): opt.R[n,n]   = np.double(error[n]**2)
-
-    xac=[]
-    adX=[]
-    for lag in range(nlag):
-        xpc = f.variables['xpc_%02d'%(lag+1)].get_value()
-        opt.x[lag*nparams:(lag+1)*nparams] = xpc
-        X = f.variables['pdX_%02d'%(lag+1)].get_value()
-        opt.X_prime[lag*nparams:(lag+1)*nparams,:] = np.transpose(X)
-        HX = f.variables['dF'][:,0:sel_obs]
-        opt.HX_prime[:,:] = np.transpose(HX)
-
-        # Also create arrays of the analysis of the fortran code for later comparison
-
-        xac.extend ( f.variables['xac_%02d'%(lag+1)].get_value())
-        adX.append (f.variables['adX_%02d'%(lag+1)].get_value() )
-
-    xac=np.array(xac)
-    X_prime=np.array(adX).swapaxes(1,2).reshape((opt.nparams*opt.nlag,opt.nmembers))
-
-    opt.SerialMinimumLeastSquares()
-
-    print "Maximum differences and correlation of 2 state vectors:"
-    print np.abs(xac-opt.x).max(),np.corrcoef(xac,opt.x)[0,1]
-       
-    plt.figure(1)
-    plt.plot(opt.x,label='SerialPy')
-    plt.plot(xac,label='SerialFortran')
-    plt.grid(True)
-    plt.legend(loc=0)
-    plt.title('Analysis of state vector')
-
-    print "Maximum differences of 2 state vector deviations:"
-    print np.abs(X_prime-opt.X_prime).max()
-
-    plt.figure(2)
-    plt.plot(opt.X_prime.flatten(),label='SerialPy')
-    plt.plot(X_prime.flatten(),label='SerialFortran')
-    plt.grid(True)
-    plt.legend(loc=0)
-    plt.title('Analysis of state vector deviations')
-
-    print "Maximum differences and correlation of 2 simulated obs vectors:"
-    print np.abs(simana-opt.Hx).max(),np.corrcoef(simana,opt.Hx)[0,1]
-
-    plt.figure(3)
-    plt.plot(opt.Hx,label='SerialPy')
-    plt.plot(simana,label='SerialFortran')
-    plt.grid(True)
-    plt.legend(loc=0)
-    plt.title('Analysis of CO2 mixing ratios')
-    plt.show()
-
-    f.close()
-
-def SerialvsBulk():
-    """ A test of the two algorithms currently implemented: serial vs bulk solution """    
-
-    # get data from the savestate.hdf file from the first cycle of CarbonTracker 2009 release
-
-    savefile = '/data/CO2/peters/carbontracker/raw/ct09rc0i/20000108/savestate.hdf'
-    print savefile
-
-    f = Nio.open_file(savefile,'r')
-    obs = f.variables['co2_obs_fcast'].get_value()
-
-    nobs = 77
-
-    dims      = ( int(DaCycle.da_settings['time.nlag']),
-                  int(DaCycle.da_settings['forecast.nmembers']),
-                  int(DaCycle.DaSystem.da_settings['nparameters']),
-                  nobs,  )
-
-    nlag,nmembers,nparams, nobs = dims
-
-    optbulk   = CtOptimizer(dims)
-    optserial = CtOptimizer(dims)
-
-    for o,opt in enumerate([optbulk,optserial]):
-
-        opt.SetLocalization('CT2007')
-
-        obs = f.variables['co2_obs_fcast'].get_value()[0:nobs]
-        opt.obs = obs
-        sim = f.variables['co2_sim_fcast'].get_value()[0:nobs]
-        opt.Hx = sim
-        error = f.variables['error_sim_fcast'].get_value()[0:nobs]
-        flags = f.variables['flag_sim_fcast'].get_value()[0:nobs]
-        opt.flags = flags
-
-        for n in range(nobs): 
-            opt.R[n,n]   = np.double(error[n]**2)
-
-        xac=[]
-        for lag in range(nlag):
-            xpc = f.variables['xpc_%02d'%(lag+1)].get_value()
-            opt.x[lag*nparams:(lag+1)*nparams] = xpc
-            X = f.variables['pdX_%02d'%(lag+1)].get_value()
-            opt.X_prime[lag*nparams:(lag+1)*nparams,:] = np.transpose(X)
-            HX = f.variables['dF'][:,0:nobs]
-            opt.HX_prime[:,:] = np.transpose(HX)
-
-        if o == 0:
-            opt.BulkMinimumLeastSquares()
-            x1=opt.x
-            xp1=opt.X_prime
-            hx1=opt.Hx
-            hxp1=opt.HX_prime
-            hphr1=opt.HPHR
-            k1=opt.KG
-        if o == 1:
-            opt.SerialMinimumLeastSquares()
-            x2=opt.x
-            xp2=opt.X_prime
-            hx2=opt.Hx
-            hxp2=opt.HX_prime
-            hphr2=opt.HPHR
-            k2=opt.KG
-           
-    plt.figure()
-
-    print "Maximum differences and correlation of 2 state vectors:"
-    print np.abs(x2-x1).max(),np.corrcoef(x2,x1)[0,1]
-       
-    plt.figure(1)
-    plt.plot(x1,label='Serial')
-    plt.plot(x2,label='Bulk')
-    plt.grid(True)
-    plt.legend(loc=0)
-    plt.title('Analysis of state vector')
-
-    print "Maximum differences of 2 state vector deviations:"
-    print np.abs(xp2-xp1).max()
-
-    plt.figure(2)
-    plt.plot(xp1.flatten(),label='Serial')
-    plt.plot(xp2.flatten(),label='Bulk')
-    plt.grid(True)
-    plt.legend(loc=0)
-    plt.title('Analysis of state vector deviations')
-
-    print "Maximum differences and correlation of 2 simulated obs vectors:"
-    print np.abs(hx2-hx1).max(),np.corrcoef(hx2,hx1)[0,1]
-
-    plt.figure(3)
-    plt.plot(hx1,label='Serial')
-    plt.plot(hx2,label='Bulk')
-    plt.title('Analysis of CO2 mixing ratios')
-    plt.grid(True)
-    plt.legend(loc=0)
-
-    plt.show()
-
-    f.close()
-
-
-
-if __name__ == "__main__":
-
-    import sys
-
-    sys.path.append('../../')
-
-    import os
-    from da.tools.general import StartLogger 
-    from da.tools.initexit import CycleControl 
-    from da.ct.statevector import CtStateVector, CtMember, PrepareState
-    from da.ct.obs import CtObservations, MixingRatioSample
-    from da.ct.optimizer import CtOptimizer
-    import numpy as np
-    import da.tools.rc as rc
-    import datetime
-    import Nio
-    import matplotlib.pyplot as plt
-
-    opts = ['-v']
-    args = {'rc':'da.rc','logfile':'da_initexit.log','jobrcfilename':'test.rc'}
-
-    StartLogger()
-    DaCycle = CycleControl(opts,args)
-
-    dummy = CtMember(0)
-    DaCycle.Initialize()
-    StateVector = CtStateVector(DaCycle)
-    samples = CtObservations(DaCycle.DaSystem,datetime.datetime(2000,1,1))
-
-    # This is a test of the CarbonTracker release ct2009 fortran solution and the one implemented here
-
-    #SerialPyAgainstSerialFortran()
-    #sys.exit(0)
-
-    SerialvsBulk()
-    sys.exit(0)
-
diff --git a/da/tm5/__init__.py b/da/tm5/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/da/tm5/observationoperator.py b/da/tm5/observationoperator.py
deleted file mode 100755
index 42a8ef6ef080da9d289a1934b6f80f9d484c43a4..0000000000000000000000000000000000000000
--- a/da/tm5/observationoperator.py
+++ /dev/null
@@ -1,629 +0,0 @@
-#!/usr/bin/env python
-# tm5_tools.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 09 Feb 2009.
-Major modifications to go to a class-based approach, July 2010.
-
-This module holds specific functions needed to use the TM5 model within the data assimilation shell. It uses the information 
-from the DA system in combination with the generic tm5.rc files. 
-
-The TM5 model is now controlled by a python subprocess. This subprocess consists of an MPI wrapper (written in C) that spawns
-a large number ( N= nmembers) of TM5 model instances under mpirun, and waits for them all to finish.
-
-The design of the system assumes that the tm5.x (executable) was pre-compiled with the normal TM5 tools, and is residing in a 
-directory specified by the ${RUNDIR} of a tm5 rc-file. This tm5 rc-file name is taken from the data assimilation rc-file. Thus,
-this python shell does *not* compile the TM5 model for you!
-
-"""
-
-import os, sys
-import subprocess
-import logging
-import shutil
-
-sys.path.append(os.getcwd())
-
-identifier = 'TM5'
-version = 'release 3.0'
-mpi_shell_filename = 'tm5_mpi_wrapper'
-mpi_shell_location = 'da/bin/'
-
-
-################### Begin Class TM5 ###################
-
-from da.baseclasses.observationoperator import ObservationOperator
-
-class TM5ObservationOperator(ObservationOperator):
-    """ This class holds methods and variables that are needed to run the TM5 model. It is initiated with as only argument a TM5 rc-file
-        location. This rc-file will be used to figure out the settings for the run. 
-        
-        *** This method of running TM5 assumes that a pre-compiled tm5.exe is present, and it will be run from time.start to time.final ***
-        
-        These settings can be modified later. To run a model version, simply compile the model using an existing TM5 rc-file, then 
-        open python, and type:
-
-           []> tm=TM5('/Users/peters/Modeling/TM5/tutorial.rc')
-           []> tm.WriteRc()
-           []> tm.WriteRunRc()
-           []> tm.Run()
-
-        To use this class inside a data assimilation cycle, a stand-alone method "Initialize()" is included which modifies the TM5
-        settings according to an external dictionary of values to overwrite, and then runs the TM5 model.
-
-    
-    """
-
-    def __init__(self, RcFileName, DaCycle=None):
-        """ The instance of an TMObservationOperator is application dependent """
-        self.Identifier = self.getid()    # the identifier gives the model name
-        self.Version = self.getversion()       # the model version used
-        self.RestartFileList = []
-        self.OutputFileList = ()
-        self.RcFileType = 'None'
-        self.outputdir = None # Needed for opening the samples.nc files created 
-
-        self.LoadRc(RcFileName)   # load the specified rc-file
-        self.ValidateRc()         # validate the contents
-
-        # The following code allows the object to be initialized with a DaCycle object already present. Otherwise, it can
-        # be added at a later moment.
-
-        if DaCycle != None:
-            self.DaCycle = DaCycle
-        else:
-            self.DaCycle = {}
-
-        logging.info('Observation Operator initialized: %s (%s)' % (self.Identifier, self.Version))
-
-    def getid(self):
-        return identifier
-
-    def getversion(self):
-        return version
-
-    def Initialize(self):
-        """ 
-        Prepare a forward model TM5 run, this consists of:
-
-          - reading the TM5 rc-file, 
-          - validating it, 
-          - modifying the values,
-          - Creating a tm5_runtime.rc file
-          - Removing the existing tm5.ok file if present
-    
-        """
-        from da.tools.general import CreateLinks
-
-# First reload the original tm5.rc file to get unmodified settings
-
-        self.LoadRc(self.RcFileName)   # load the specified rc-file
-        self.ValidateRc()         # validate the contents
-
-# Create a link from TM5 to the rundirectory of the das system
-
-        sourcedir = self.tm_settings[self.rundirkey]
-        targetdir = os.path.join(self.DaCycle['dir.exec'], 'tm5')
-        CreateLinks(sourcedir, targetdir)
-        self.outputdir = self.tm_settings[self.outputdirkey]
-
-        self.DaCycle['dir.exec.tm5'] = targetdir
-
-# Write a modified TM5 model rc-file in which run/break times are defined by our da system
-
-        NewItems = {
-                    self.timestartkey    : self.DaCycle['time.sample.start']    ,
-                    self.timefinalkey    : self.DaCycle['time.sample.end']    ,
-                    'jobstep.timerange.start'    : self.DaCycle['time.sample.start']    ,
-                    'jobstep.timerange.end'      : self.DaCycle['time.sample.end']    ,
-                    'ct.params.input.dir' : self.DaCycle['dir.input']    ,
-                    'ct.params.input.file' : os.path.join(self.DaCycle['dir.input'], 'parameters')    ,
-                    'output.flask.infile' : self.DaCycle['ObsOperator.inputfile']
-                    }
-
-        if self.DaCycle['time.restart']:  # If this is a restart from a previous cycle, the TM5 model should do a restart
-            NewItems[self.istartkey] = self.restartvalue
-        if self.DaCycle['time.sample.window'] != 0:  # If this is a restart from a previous time step wihtin the filter lag, the TM5 model should do a restart
-            NewItems[self.istartkey] = self.restartvalue
-        
-        # If neither one is true, simply take the istart value from the tm5.rc file that was read
-
-        self.ModifyRC(NewItems)
-        self.WriteRc()
-
-
-    def LoadRc(self, RcFileName):
-        """ 
-        This method loads a TM5 rc-file with settings for this simulation 
-        """
-        import da.tools.rcn as rc
-
-        self.tm_settings = rc.read(RcFileName)
-        self.RcFileName = RcFileName
-        self.Tm5RcLoaded = True
-
-        if 'my.source.dirs' in self.tm_settings.keys():
-            self.RcFileType = 'pycasso'
-        else:
-            self.RcfIleType = 'pre-pycasso'
-
-        logging.debug('TM5 rc-file loaded successfully')
-
-
-
-    def ValidateRc(self):
-        """
-        Validate the contents of the tm_settings dictionary and add extra values. The required items for the TM5 rc-file
-        are specified in the tm5_tools module, as dictionary variable "needed_rc_items".
-        """
-        from da.tools.general import ToDatetime
-
-        if self.RcFileType == 'pycasso':
-
-            self.projectkey = 'my.project.dir'
-            self.rundirkey = 'my.run.dir'
-            self.outputdirkey = 'output.dir'
-            self.savedirkey = 'restart.write.dir'
-            self.timestartkey = 'timerange.start'
-            self.timefinalkey = 'timerange.end'
-            self.timelengthkey = 'jobstep.length'
-            self.istartkey = 'istart'
-            self.restartvalue = 33
-
-        else:
-
-            self.projectkey = 'runid'
-            self.rundirkey = 'rundir'
-            self.outputdirkey = 'outputdir'
-            self.savedirkey = 'savedir'
-            self.timestartkey = 'time.start'
-            self.timefinalkey = 'time.final'
-            self.timelengthkey = 'time.break.nday'
-            self.istartkey = 'istart'
-            self.restartvalue = 3
-
-        needed_rc_items = [
-                            self.projectkey    ,
-                            self.rundirkey    ,
-                            self.outputdirkey ,
-                            self.savedirkey   ,
-                            self.timestartkey ,
-                            self.timefinalkey ,
-                            self.timelengthkey,
-                            self.istartkey
-                          ]
-
-        for k, v in self.tm_settings.iteritems():
-            if v == 'True' : self.tm_settings[k] = True
-            if v == 'False': self.tm_settings[k] = False
-            if 'date' in k : self.tm_settings[k] = ToDatetime(v)
-            if 'time.start' in k : 
-                self.tm_settings[k] = ToDatetime(v, fmt='TM5')
-            if 'time.final' in k : 
-                self.tm_settings[k] = ToDatetime(v, fmt='TM5')
-            if 'timerange.start' in k : 
-                self.tm_settings[k] = ToDatetime(v)
-            if 'timerange.end' in k : 
-                self.tm_settings[k] = ToDatetime(v)
-
-        for key in needed_rc_items:
-            if not self.tm_settings.has_key(key):
-                status, msg = (False, 'Missing a required value in rc-file : %s' % key)
-                logging.error(msg)
-                raise IOError, msg
-        status = True
-        logging.debug('rc-file has been validated succesfully')
-
-
-
-
-    def ModifyRC(self, NewValues):
-        """ 
-        Modify parts of the tm5 settings, for instance to give control of file locations to the DA shell
-        instead of to the tm5.rc script. 
-
-        Note that we replace these values in all {key,value} pairs of the tm5.rc file!
-
-        """
-    
-        for k, v in NewValues.iteritems():
-            if self.tm_settings.has_key(k):
-                # keep previous value
-                v_orig = self.tm_settings[k]
-                #replace with new
-                self.tm_settings[k] = v
-                #replace all instances of old with new, but only if it concerns a name of a path!!!
-                if os.path.exists(str(v)): 
-                    for k_old, v_old in self.tm_settings.iteritems():
-                        if not isinstance(v_old, str): 
-                            continue
-                        if str(v_orig) in str(v_old): 
-                            v_new = str(v_old).replace(str(v_orig), str(v))
-                            self.tm_settings[k_old] = v_new
-
-                logging.debug('Replaced tm5 rc-item %s ' % k)
-
-            else:
-                self.tm_settings[k] = v
-                logging.debug('Added new tm5 rc-item %s ' % k)
-
-
-    def WriteRc(self):
-        """ 
-        Write the rc-file settings to a tm5.rc file in the rundir
-        """
-        import da.tools.rc as rc
-        
-        tm5rcfilename = os.path.join(self.tm_settings[self.rundirkey], 'tm5.rc')
-        rc.write(tm5rcfilename, self.tm_settings)
-        logging.debug("Modified tm5.rc written (%s)" % tm5rcfilename)
-        
-    def WriteRunRc(self):
-        """ 
-        Create the tm5-runtime.rc file which is read by initexit.F90 to control time loop and restart from save files
-        """
-        import da.tools.rc as rc
-
-        tm5rcfilename = os.path.join(self.tm_settings[self.rundirkey], 'tm5_runtime.rc')
-        rc.write(tm5rcfilename, self.tm_settings)
-
-        rc_runtm5 = {}
-        rc_runtm5['year1'] = self.tm_settings[self.timestartkey].year
-        rc_runtm5['month1'] = self.tm_settings[self.timestartkey].month
-        rc_runtm5['day1'] = self.tm_settings[self.timestartkey].day
-        rc_runtm5['hour1'] = self.tm_settings[self.timestartkey].hour
-        rc_runtm5['minu1'] = 0
-        rc_runtm5['sec1'] = 0
-
-        rc_runtm5['year2'] = self.tm_settings[self.timefinalkey].year
-        rc_runtm5['month2'] = self.tm_settings[self.timefinalkey].month
-        rc_runtm5['day2'] = self.tm_settings[self.timefinalkey].day
-        rc_runtm5['hour2'] = self.tm_settings[self.timefinalkey].hour
-        rc_runtm5['minu2'] = 0
-        rc_runtm5['sec2'] = 0
-
-        rc_runtm5[self.istartkey] = self.tm_settings[self.istartkey]
-        rc_runtm5[self.savedirkey] = self.tm_settings[self.savedirkey]
-        rc_runtm5[self.outputdirkey] = self.tm_settings[self.outputdirkey]
-
-        rc.write(tm5rcfilename, rc_runtm5)
-
-        logging.debug("Modified tm5_runtime.rc written (%s)" % tm5rcfilename)
-
-    def ValidateInput(self):
-        """
-        Make sure that parameter files are written to the TM5 inputdir, and that observation lists are present
-        """
-
-        datadir = self.tm_settings['ct.params.input.dir']
-        if not os.path.exists(datadir):
-            msg = "The specified input directory for the TM5 model to read from does not exist (%s), exiting..." % datadir ; logging.error(msg)
-            raise IOError, msg
-
-        datafiles = os.listdir(datadir)
-        obsfile = self.DaCycle['ObsOperator.inputfile']
-
-        if not os.path.exists(obsfile):
-            msg = "The specified obs input file for the TM5 model to read from does not exist (%s), exiting..." % obsfile ; logging.error(msg)
-            raise IOError, msg
-
-
-        for n in range(int(self.DaCycle['da.optimizer.nmembers'])):
-            paramfile = 'parameters.%03d.nc' % n
-            if paramfile not in datafiles:
-                msg = "The specified parameter input file for the TM5 model to read from does not exist (%s), exiting..." % paramfile ; logging.error(msg)
-                raise IOError, msg
-
-        # Next, make sure there is an actual model version compiled and ready to execute
-
-        targetdir = os.path.join(self.tm_settings[self.rundirkey])
-
-        if self.RcFileType == 'pycasso':
-            self.Tm5Executable = os.path.join(targetdir, self.tm_settings['my.basename'] + '.x')
-        else:
-            self.Tm5Executable = os.path.join(targetdir, 'tm5.x')
-
-        if not os.path.exists(self.Tm5Executable):
-            logging.error("Required TM5 executable was not found %s" % self.Tm5Executable)
-            logging.error("Please compile the model with the specified rc-file and the regular TM5 scripts first")
-            raise IOError
-
-    def GetInitialData(self):
-        """ This method places all initial data needed by an ObservationOperator in the proper folder for the model.
-            For TM5, this means copying the save_*.hdf* files to the dir.save directory from which TM5 will read initial
-            concentrations for all tracers. 
-
-            We get the input data from the restart.current directory at 2 times:
-                (1) When the model starts the forecast over nlag cycles
-                (2) When the model starts the advance step over 1 cycle
-
-
-         """
-
-        logging.debug("Moving TM5 model restart data from the restart/current directory to the TM5 save dir")
-
-        # First get the restart data for TM5 from the current restart dir of the filter
-
-        sourcedir = self.DaCycle['dir.restart.current']
-        targetdir = self.tm_settings[self.savedirkey]
-
-        for file in os.listdir(sourcedir):
-            #LU a lil mess
-            file = os.path.join(sourcedir, file)
-            if os.path.isdir(file): # skip dirs
-                logging.debug("           [skip] .... %s " % file)
-                continue    
-            #if not file.startswith('save_'):
-            if not file.startswith('TM5_restart'):
-
-                logging.debug("           [skip] .... %s " % file)
-                continue    
-
-            # all okay, copy file
-
-            logging.debug("           [copy] .... %s " % file)
-            shutil.copy(file, file.replace(sourcedir, targetdir))
-
-        logging.debug("All restart data have been copied from the restart/current directory to the TM5 save dir")
-
-
-    def Run(self):
-        """ 
-         Start the TM5 executable. A new log file is started for the TM5 model IO, and then a subprocess is
-         spawned with the tm5_mpi_wrapper and the tm5.x executable. The exit code of the model is caught and
-         only if successfull on all processors will execution of the shell continue. 
-         
-        """
-
-        cwd = os.getcwd()
-
-        # From here on, several options should be implemented.
-
-        #
-        # (1) Where an mpi process is forked to do a TM5 instance with N tracers, each an ensemble member
-        #
-        # (2) Where N processes are spawned, each being one TM5 instance representing one member
-        #
-        # (3) Where N/m processes are spawned, each being a TM5 instance that handles m ensemble members
-        #
-        # In principle, it is best to make these processes produce scripts that can be executed stand-alone, or
-        # be submitted to a queue.
-        #
-
-        # Open logfile and spawn model, wait for finish and return code
-
-        # Code for Option (1)
-
-        code = self.TM5_With_N_tracers()
-
-        if code == 0:
-            logging.info('Finished model executable succesfully (%s)' % code)
-            self.Status = 'Success'
-        else:
-            logging.error('Error in model executable return code: %s ' % code)
-            self.Status = 'Failed'
-            raise OSError
-
-        # Return to working directory
-
-        os.chdir(cwd)
-
-        return code
-
-    def TM5_under_mpirun(self):
-        """ Method handles the case where a shell runs an MPI process that forks into N TM5 model instances """
-        from string import join
-        import datetime
-
-        DaPlatForm = self.DaCycle.DaPlatForm
-        targetdir = os.path.join(self.tm_settings[self.rundirkey])
-
-        if not os.path.exists(os.path.join(mpi_shell_location, mpi_shell_filename)):
-            logging.error("Cannot find the mpi_shell wrapper needed for completion (%s) in (%s)" % (mpi_shell_filename, mpi_shell_location))
-            logging.error("Please see the %s/readme_wrapper.txt file for instructions to compile it" % mpi_shell_location)
-            raise IOError
-
-        shutil.copy(os.path.join(mpi_shell_location, mpi_shell_filename) , os.path.join(targetdir, mpi_shell_filename))
-
-        # Go to executable directory and start the subprocess, using a new logfile
-
-        os.chdir(targetdir)
-        logging.debug('Changing directory to %s ' % targetdir)
-
-        # Remove the tm5.ok file from a previous run, placed back only if a successful TM5 run is executed
-
-        okfile = 'tm5.ok'
-        if os.path.exists(okfile): 
-            os.remove(okfile)
-
-        nprocesses = self.DaCycle['da.optimizer.nmembers']
-        jobparams = {'jobname':'tm5',
-                                   'jobnodes':'ncomp %d' % int(nprocesses),
-                                   'jobtime':'00:30:00',
-                                   'joblog':os.path.join(self.DaCycle['dir.jobs'])
-                                  }
-
-        # file ID and names
-        jobid = 'tm5'
-        targetdir = os.path.join(self.DaCycle['dir.exec'])
-        jobfile = os.path.join(targetdir, 'jb.%s.jb' % jobid)
-        logfile = jobfile.replace('.jb', '.log')
-
-        template = DaPlatForm.GetJobTemplate(jobparams, block=True)
-        template += 'cd %s\n' % targetdir
-        template += 'mpirun -np %d %s ./tm5.x\n' % (int(nprocesses), mpi_shell_filename,)
-
-        DaPlatForm.WriteJob(jobfile, template, jobid)
-      
-        logging.info('Submitting job at %s' % datetime.datetime.now())
-        code = DaPlatForm.SubmitJob(jobfile, joblog=jobfile) 
-        logging.info('Resuming job at %s' % datetime.datetime.now())
-        #LU the value for code has changed and is changed further. senseless.
-        if not os.path.exists(okfile): 
-            code = -1
-        else:
-            code = 0
-
-        return code
-
-
-    def TM5_With_N_tracers(self):
-        """ Method handles the case where one TM5 model instance with N tracers does the sampling of all ensemble members"""
-        from string import join
-        import datetime
-
-        DaPlatForm = self.DaCycle.DaPlatForm
-
-        targetdir = os.path.join(self.tm_settings[self.rundirkey])
-
-        # Go to executable directory and start the subprocess, using a new logfile
-
-        os.chdir(targetdir)
-        logging.debug('Changing directory to %s ' % targetdir)
-
-        # Remove the tm5.ok file from a previous run, placed back only if a successful TM5 run is executed
-
-        okfile = 'tm5.ok'
-        if os.path.exists(okfile): 
-            os.remove(okfile)
-
-        # file ID and names
-        jobid = 'tm5'
-        jobfile = os.path.join(targetdir, 'jb.%s.jb' % jobid)
-        logfile = jobfile.replace('.jb', '.log')
-
-        nprocesses = int(self.DaCycle['da.optimizer.nmembers']) / 5  # Note that we assign 5 tracers to each processor, this seems good for TM5
-        jobparams = {'jobname':'tm5',
-                                   'jobnodes':'ncomp %d' % int(nprocesses),
-                                   'jobtime':'00:30:00',
-                                   'joblog':os.path.join(self.DaCycle['dir.jobs'])
-                                  }
-
-        template = DaPlatForm.GetJobTemplate(jobparams, block=True)
-        template += 'cd %s\n' % targetdir
-        template += '%s -np %d %s tm5.rc\n' % (self.tm_settings['mpirun.command'], int(nprocesses), self.Tm5Executable,)
-
-        DaPlatForm.WriteJob(jobfile, template, jobid)
-      
-        logging.info('Submitting job at %s' % datetime.datetime.now())
-        code = DaPlatForm.SubmitJob(jobfile, block=True) 
-        logging.info('Resuming job at %s' % datetime.datetime.now())
-        #LU same here!
-        if not os.path.exists(okfile): 
-            code = -1
-        else:
-            code = 0
-
-        return code
-
-    def SaveData(self):
-        """ Copy the TM5 recovery data from the outputdir to the TM5 savedir, also add the restart files to a list of names
-            that is used by the DaCycle object to collect restart data for the filter.
-
-            WP Note: with the new pycasso restart files we no longer need to copy save files from outdir to savedir
-
-            Note 2: also adding the weekly mean flux output to the OutputFileList for later collection
-         """
-
-        sourcedir = os.path.join(self.tm_settings[self.savedirkey])
-        filter = ['%s' % self.tm_settings[self.timefinalkey].strftime('%Y%m%d')]
-
-        logging.debug("Creating a new list of TM5 restart data")
-        logging.debug("           from directory: %s " % sourcedir)     
-        logging.debug("           with filter: %s " % filter)
-
-
-        # Start from empty lists for each TM5 run. Note that these "private" lists from the obs operator are later on appended to the system
-        # lists
-
-        self.RestartFileList = []
-
-        for file in os.listdir(sourcedir):
-            file = os.path.join(sourcedir, file)
-            if os.path.isdir(file): # skip dirs
-                skip = True
-            elif filter == []:      # copy all
-                skip = False        
-            else:                   # check filter
-                skip = True         # default skip
-                for f in filter:
-                    if f in file: 
-                        skip = False # unless in filter
-                        break
-                
-            if skip: 
-                logging.debug("           [skip] .... %s " % file)                                   
-                continue    
-
-            self.RestartFileList.append(file)
-            logging.debug("           [added to restart list] .... %s " % file)
-
-        sourcedir = os.path.join(self.tm_settings[self.outputdirkey])
-        sd_ed = self.DaCycle['time.sample.stamp']
-        filter = ['flask_%s' % sd_ed, 'flux1x1_%s' % sd_ed]
-
-        logging.debug("Creating a new list of TM5 output data to collect")
-        logging.debug("           from directory: %s " % sourcedir)       
-        logging.debug("           with filter: %s " % filter)             
-
-
-
-        # Start from empty lists for each TM5 run. Note that these "private" lists from the obs operator are later on appended to the system
-        # lists
-
-        self.OutputFileList = []
-
-        for file in os.listdir(sourcedir):
-
-            file = os.path.join(sourcedir, file)
-
-            if os.path.isdir(file): # skip dirs
-                skip = True
-            elif filter == []:      # copy all
-                skip = False        
-            else:                   # check filter
-                skip = True         # default skip
-                for f in filter:
-                    if f in file: 
-                        skip = False # unless in filter
-                        break
-                
-            if skip: 
-                logging.debug("           [skip] .... %s " % file)
-                continue    
-
-            self.OutputFileList.append(file)
-            logging.debug("           [added to output list] .... %s " % file)
-
-
-################### End Class TM5 ###################
-                   
-
-if __name__ == "__main__":
-
-    from da.tools.initexit import StartLogger
-    from da.tools.pipeline import JobStart
-    import datetime as dtm
-
-    sys.path.append(os.getcwd())
-
-    StartLogger()
-
-    DaCycle = JobStart([], {'rc':'da.rc'})
-    DaCycle.Initialize()
-    DaCycle['time.sample.start'] = dtm.datetime(2000, 1, 1)
-    DaCycle['time.sample.end'] = dtm.datetime(2000, 1, 2)
-    DaCycle['time.sample.window'] = 0
-
-    tm = TM5ObservationOperator()
-    tm.Initialize()
-    tm.Run()
-    tm.SaveData()
-
-
-
-
-
diff --git a/da/tools/__init__.py b/da/tools/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/da/tools/general.py b/da/tools/general.py
deleted file mode 100755
index e519db3581ce97ff2fca0cc0d8f934d779faf654..0000000000000000000000000000000000000000
--- a/da/tools/general.py
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/usr/bin/env python
-# tools_da.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 03 Oct 2008.
-
-Temporary module to hold classes and methods that are in development
-
-"""
-
-import logging
-import os
-import sys
-import shutil
-import datetime
-import da.tools.rc as rc
-
-def ValidateRC(rcfile,needed_items):
-    """ validate the contents of an rc-file given a dictionary of required keys """
-
-    for k,v in rcfile.iteritems():
-        if v == 'True' : rcfile[k] = True
-        if v == 'False': rcfile[k] = False
-        if 'date' in k : rcfile[k] = datetime.datetime.strptime(v,'%Y-%m-%d %H:%M:%S')
-
-    for key in needed_items:
-
-        if not rcfile.has_key(key):
-            status,msg = ( False,'Missing a required value in rc-file : %s' % key)
-            logging.error(msg)
-            raise IOError,msg
-
-    status,msg = ( True,'rc-file has been validated succesfully' )  ; logging.debug(msg)
-
-
-def CreateDirs(dirname,forceclean=False):
-    """ Create a directory and report success, only if non-existent """
-
-    if forceclean:
-        try:
-            shutil.rmtree(dirname)
-        except:
-            pass
-
-    if not os.path.exists(dirname):
-        os.makedirs(dirname)
-        msg='Creating new directory %s' % dirname
-        logging.info(msg)
-    else:
-        msg='Using existing directory %s' % dirname
-        logging.debug(msg)
-    return dirname
-
-def CreateLinks(sourcedir,targetdir):
-    """ Create a symbolic link from, source to target and report success, note
-        that exisiting links are first removed and then recreated """
-
-    if os.path.exists(targetdir):
-        os.unlink(targetdir)
-        msg='Unlinking existing directory %s' % targetdir
-        logging.debug(msg)
-    try:
-        os.symlink(sourcedir,targetdir)
-        msg='Created new link from %s to %s' % (sourcedir, targetdir)
-        logging.debug(msg)
-    except OSError,msg:
-        msg='Failed to create link from %s to %s' % (sourcedir, targetdir)
-        logging.error(msg)
-        raise OSError
-
-    return None
-
-
-
-def AdvanceTime(time_in,interval):
-    """ Advance time_in by a specified interval"""
-
-    time_out=time_in
-
-    if interval == 'month':                       # if monthly, this run will go to the first day of the next month
-        if time_in.month != 12: 
-            time_out = datetime.datetime(time_in.year,time_in.month+1,1,time_in.hour,0,0)
-        else: 
-            time_out = datetime.datetime(time_in.year+1,1,1,time_in.hour,0,0)  # end of year provision
-    elif interval == 'week':
-        time_out = time_in + datetime.timedelta(days=7)
-    elif isinstance(interval,datetime.timedelta):
-        time_out = time_in + interval
-    else:                    # assume that the interval specified is the number of days to run forward before resubmitting
-        time_out = time_in + datetime.timedelta(days=float(interval))
-
-    return time_out
-
-
-
-def ToDatetime(datestring,fmt=None):
-    """ convert a date string to a datetime object """
-    import datetime
-  
-    if fmt == 'TM5':
-        datestring = '%04s-%02s-%02s %02s:%02s:00'%(datestring[0:4],datestring[4:6],datestring[6:8],datestring[8:10],datestring[10:12])
-    elif fmt == 'pycasso-TM5':
-        pass # Format already compatible
-    else:
-        pass 
-
-
-    try:
-        return datetime.datetime.strptime(datestring,'%Y-%m-%d %H:%M:%S')
-    except:
-        date,time = datestring.split(' ')
-        year,month,day = map(int,date.split('-'))
-        hour,minute,second = map(int,time.split(':'))
-        return datetime.datetime(year,month,day,hour,minute,second)
-
-def ToDectime(dd):
-    """ convert a datetime object to a decimal date """
-    import datetime
-    import calendar
-    from matplotlib.pylab import date2num
-
-    Days0=date2num(datetime.datetime(dd.year,1,1))
-    if calendar.isleap(dd.year):
-        DaysPerYear=366.
-    else:
-        DaysPerYear=365.
-    DayFrac=date2num(dd)
-    return dd.year+(DayFrac-Days0)/DaysPerYear
-
-def StoreData(pickleobject,filename):
-    """ pickle object into a specified file name """
-    import cPickle
-
-    f = open(filename,'wb')
-    dummy = cPickle.dump(pickleobject,f,-1)
-    f.close()
-
-    return None
-
-def RestoreData(filename):
-    """ unpickle object into a specified file name """
-    import cPickle
-
-    f = open(filename,'rb')
-    pickleobject = cPickle.load(f)
-    f.close()
-
-    return pickleobject
-
-if __name__ == "__main__":
-    pass
diff --git a/da/tools/initexit.py b/da/tools/initexit.py
deleted file mode 100755
index 8bea9117dd232e83c5e2c3085cdaf816cb09eac5..0000000000000000000000000000000000000000
--- a/da/tools/initexit.py
+++ /dev/null
@@ -1,811 +0,0 @@
-#!/usr/bin/env python
-# da_initexit.py
-
-"""
-.. module:: initexit
-.. moduleauthor:: Wouter Peters 
-
-Revision History:
-File created on 13 May 2009.
-
-The CycleControl class is found in the module :mod:`initexit`. It is derived from the standard python :class:`dictionary` object. It is the only core object of CTDAS that is automatically created in the pipeline, the user (normally) does not need to modify or extend it. The class is created based on options and arguments passes on the command line when submitting your main CTDAS job. 
-
-Valid options are defined in 
-
-.. autofunction:: da.tools.initexit.ParseOptions
-
-With the name of a valid ``rc-file``, the CycleControl object is instantiated and validated. An example rc-file looks
-like this:::
-
-    ! Info on the data assimilation cycle
-
-    time.restart        : False                     ! Restart from an existing run T/F
-    time.start          : 2000-01-01 00:00:00       ! Start time of first cycle
-    time.finish         : 2000-01-08 00:00:00       ! End time of last cycle
-    time.cycle          : 7                         ! length of each cycle, 7 means one week
-    time.nlag           : 5                         ! number of cycles in one smoother window
-    dir.da_run          : ${HOME}/tmp/test_da       ! the run directory for you project
-
-    ! Info on the DA system used
-
-    da.system           : CarbonTracker             ! an identifier for your inversion system
-    da.system.rc        : da/rc/carbontracker.rc    ! the settings needed in your inversion system
-
-    ! Info on the forward model to be used
-
-    da.obsoperator         : TM5                                ! an identifier for your observation operator
-    da.obsoperator.rc      : ${HOME}/Modeling/TM5/tm5-ctdas.rc  ! the rc-file needed to run youobservation operator
-    da.optimizer.nmembers  : 30                                 ! the number of ensemble members desired in the optimization
-
-The most important method of the CycleControl object are listed below:
-
-.. autoclass:: da.tools.initexit.CycleControl 
-   :members: Initialize, Finalize,  CollectRestartData, MoveRestartData, 
-             SubmitNextCycle, CleanUpCycle, SetupFileStructure, RecoverRun, RandomSeed
-
-Two important attributes of the CycleControl object are:
-    (1) DaSystem, an instance of a :ref:`dasystem`
-    (2) DaPlatForm, an instance of a :ref:`platform`
-
-Other functions in the module initexit that are related to the control of a DA cycle are:
-
-.. autofunction:: da.tools.initexit.StartLogger 
-.. autofunction:: da.tools.initexit.ValidateOptsArgs 
-
-
-"""
-
-needed_da_items = [
-    'time.start',
-    'time.finish',
-    'time.nlag',
-    'time.cycle',
-    'dir.da_run',
-    'da.system',
-    'da.system.rc',
-    'da.obsoperator',
-    'da.obsoperator.rc',
-    'da.optimizer.nmembers']
-
-# only needed in an earlier implemented where each substep was a separate job
-# validprocesses = ['start','done','samplestate','advance','invert']
-
-import logging
-import os
-import sys
-import shutil
-import datetime
-
-
-
-class CycleControl(dict):
-    """
-    This object controls the CTDAS system flow and functionality.
-    """
-        
-    def __init__(self, opts=[], args={}):
-        """
-        The CycleControl object is instantiated with a set of options and arguments.
-        The list of arguments must contain the name of an existing ``rc-file``. 
-        This rc-file is loaded by method :meth:`~da.tools.initexit.CycleControl.LoadRc` and validated
-        by :meth:`~da.tools.initexit.CycleControl.ValidateRC`
-
-        Options for the CycleControl consist of accepted command line flags or arguments 
-        in :func:`~da.tools.initexit.CycleControl.ParseOptions`
-
-        """
-
-        self.LoadRc(args['rc'])
-        self.ValidateRC()
-        self.opts = opts
-
-        # Add some useful variables to the rc-file dictionary
-
-        self['jobrcfilename'] = self.RcFileName
-        self['dir.da_submit'] = os.getcwd()
-        self['da.crash.recover'] = '-r' in opts
-        self['verbose'] = '-v' in opts
-        self.DaSystem = None # to be filled later
-        self.RestartFileList = [] # List of files needed for restart, to be extended later
-        self.OutputFileList = [] # List of files needed for output, to be extended later
-
-    def __str__(self):
-        """
-        String representation of a CycleControl object
-        """
-
-        msg = "==============================================================="    ; print msg
-        msg = "DA Cycle rc-file is %s" % self.RcFileName                                ; print msg
-        msg = "DA Cycle run directory is %s" % self['dir.da_run']           ; print msg
-        msg = "DA Cycle inverse system is %s" % self['da.system']           ; print msg
-        msg = "DA Cycle obs operator is %s" % self['da.obsoperator']        ; print msg
-        msg = "==============================================================="    ; print msg
-
-        return ""
-
-
-    def LoadRc(self, RcFileName):
-        """ 
-        This method loads a DA Cycle rc-file with settings for this simulation 
-        """
-        import da.tools.rc as rc
-
-        rcdata = rc.read(RcFileName)
-        for k, v in rcdata.iteritems():
-            self[k] = v
-        self.RcFileName = RcFileName
-        self.DaRcLoaded = True
-
-        logging.info('DA Cycle rc-file (%s) loaded successfully' % self.RcFileName)
-
-
-
-    def ValidateRC(self):
-        """ 
-        Validate the contents of the rc-file given a dictionary of required keys. 
-        Currently required keys are :attr:`~da.tools.initexit.needed_da_items`
-        """
-        from da.tools.general import ToDatetime
-
-        for k, v in self.iteritems():
-	    if v in ['True', 'true', 't', 'T', 'y', 'yes']:
-		self[k] = True
-	    if v in ['False', 'false', 'f', 'F', 'n', 'no']:
-		self[k] = False
-            if 'date' in k : self[k] = ToDatetime(v)
-            if 'time.start' in k : 
-                self[k] = ToDatetime(v)
-            if 'time.end' in k : 
-                self[k] = ToDatetime(v)
-            if 'time.finish' in k : 
-                self[k] = ToDatetime(v)
-
-        for key in needed_da_items:
-            if not self.has_key(key):
-                msge = 'Missing a required value in rc-file : %s' % key
-                logging.error(msge)
-                logging.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ')
-                logging.error('Please note the update on Dec 02 2011 where rc-file names for DaSystem and ')
-                logging.error('are from now on specified in the main rc-file (see da/rc/da.rc for example)')
-                logging.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ')
-                raise IOError, msge
-
-        logging.debug('DA Cycle settings have been validated succesfully')
-
-
-    def ParseTimes(self):
-        """ 
-        Parse time related parameters into datetime objects for later use 
-        """
-        from da.tools.general import AdvanceTime
-
-        startdate = self['time.start']
-        finaldate = self['time.finish']                  
-
-        if finaldate <= startdate:
-            logging.error('The start date (%s) is not greater than the end date (%s), please revise' % (startdate.strftime('%Y%m%d'), finaldate.strftime('%Y%m%d')))
-            raise ValueError
-        #
-        cyclelength = self['time.cycle']                 # get time step
-
-# Determine end date
-
-        if cyclelength == 'infinite':
-            enddate = finaldate
-        else:
-            enddate = AdvanceTime(startdate, cyclelength)
-
-        dt = enddate - startdate
-
-        #
-        if enddate > finaldate:  # do not run beyon finaldate
-            enddate = finaldate
-
-        self['time.start'] = startdate
-        self['time.end'] = enddate
-        self['time.finish'] = finaldate
-        self['cyclelength'] = dt
-
-        logging.info("===============================================================")
-        logging.info("DA Cycle start date is %s" % startdate.strftime('%Y-%m-%d %H:%M'))
-        logging.info("DA Cycle end   date is %s" % enddate.strftime('%Y-%m-%d %H:%M'))
-        logging.info( "DA Cycle final date is %s" % finaldate.strftime('%Y-%m-%d %H:%M'))
-        logging.info("DA Cycle cycle length is %s" % cyclelength)
-        logging.info("DA Cycle restart is %s" % str(self['time.restart']))
-        logging.info("===============================================================")
-
-
-    def SetSampleTimes(self, lag):
-        """
-        Set the times over which a sampling interval will loop, depending on 
-        the lag. Note that lag falls in the interval [0,nlag-1]
-        """
-        import copy
-
-        # Start from cycle times 
-        self['time.sample.start'] = copy.deepcopy(self['time.start'])
-        self['time.sample.end'] = copy.deepcopy(self['time.end'])
-
-        # Now advance depending on lag
-
-        for l in range(lag):
-            self.AdvanceSampleTimes()
-
-        return None
-
-    def AdvanceSampleTimes(self):
-        """ 
-        Advance sampling start and end time by one cycle interval
-        """
-        from da.tools.general import AdvanceTime
-
-        self['time.sample.start'] = AdvanceTime(self['time.sample.start'], self['cyclelength'].days)
-        self['time.sample.end'] = AdvanceTime(self['time.sample.end'], self['cyclelength'].days)
-    
-
-    def AdvanceCycleTimes(self):
-        """ 
-        Advance cycle start and end time by one cycle interval
-        """
-        from da.tools.general import AdvanceTime
-        
-        self['time.start'] = start = AdvanceTime(self['time.start'], self['cyclelength'].days)
-        self['time.end'] = AdvanceTime(self['time.end'], self['cyclelength'].days)
-        self['dir.output'] = os.path.join(self['dir.da_run'], 'output', start.strftime('%Y%m%d'))
-
-
-
-    def RandomSeed(self, action='read'):
-        """ 
-        Get the randomseed and save it, or read the random seed and set it. The seed is currently stored
-        in a python :mod:`pickle` file, residing in the ``exec`` directory
-
-        """
-        import cPickle
-        import numpy as np
-
-        filename = os.path.join(self['dir.exec'], 'randomseed.pickle')
-
-        if action == 'write':
-            f = open(filename, 'wb')
-            seed = np.random.get_state()
-            cPickle.dump(seed, f, -1)
-            f.close()
-
-            msg = "Saved the random seed generator values to file"
-
-        if action == 'read':
-            f = open(filename, 'rb')
-            seed = cPickle.load(f)
-            np.random.set_state(seed)
-            f.close()
-
-            msg = "Retrieved the random seed generator values from file"
-
-        logging.info(msg)
-
-        self.RestartFileList.append(filename)
-        logging.debug("Added the randomseed.pickle file to the RestartFileList")
-
-
-        return None
-
-    def Initialize(self):
-        """ 
-        This method determines how to proceed with the cycle. Three options are implemented:
-
-            1. *Fresh start*  : set up the required file structure for this simulation and start
-            2. *Restart*      : use latest da_runtime variables from the exec dir and restart
-            3. *Recover*      : restart after crash by getting data from restart/one-ago folder
-
-        The choice that gets executed depends on the presence of 
-
-            # the ``-r`` option on the command line, this triggers a recover
-            # the ``time.restart : True`` option in the da.rc file
-
-        The latter is automatically set if the filter submits the next cycle at the end of the current one, 
-        through method :meth:`~da.tools.initexit.CycleControl.SubmitNextCycle`.
-
-        The specific call tree under each scenario is: 
-
-            1. *Fresh Start*
-                * dummy = :meth:`~da.tools.initexit.CycleControl.SetupFileStructure()`  <- Create directory tree
-            2. *Restart*
-                * dummy = :meth:`~da.tools.initexit.CycleControl.SetupFileStructure()`
-                * dummy = :meth:`~da.tools.initexit.CycleControl.RandomSeed`    <- Read the random seed from file
-            3. *Recover*
-                * dummy = :meth:`~da.tools.initexit.CycleControl.SetupFileStructure()`
-                * dummy = :meth:`~da.tools.initexit.CycleControl.RecoverRun()`          <- Recover files from restart/one-ago dir, reset ``time.start``
-                * dummy = :meth:`~da.tools.initexit.CycleControl.RandomSeed` 
-
-        And is always followed by a call to
-
-            * ParseTimes()
-            * WriteRc('jobfilename')
-
-
-        """        
-
-#
-# case 1: A recover from a previous crash, this is signaled by flag "-r"
-#
-        if self['da.crash.recover']:
-            logging.info("Recovering simulation from data in: %s" % self['dir.da_run'])
-            
-            self.SetupFileStructure()
-            self.RecoverRun()
-            self.RandomSeed('read')
-#
-# case 2: A continuation, this is signaled by rc-item time.restart = True
-#
-        elif self['time.restart']:
-            logging.info("Restarting filter from previous step")
-            
-            self.SetupFileStructure()
-            self.RandomSeed('read')
-#
-# case 3: A fresh start, this is signaled by rc-item time.restart = False
-#
-        elif not self['time.restart']:
-            logging.info("First time step in filter sequence")
-            self.SetupFileStructure()
-            # expand jobrcfilename to include exec dir from now on.
-            self['jobrcfilename'] = os.path.join(self['dir.exec'], os.path.split(self['jobrcfilename'])[-1])
-
-
-        self.ParseTimes()
-        self.WriteRC(self['jobrcfilename'])
-
-        return None
-
-
-    def SetupFileStructure(self):
-        """ 
-        Create file structure needed for data assimilation system.
-        In principle this looks like:
-
-            * ``${da_rundir}``
-            * ``${da_rundir}/input``
-            * ``${da_rundir}/output``
-            * ``${da_rundir}/exec``
-            * ``${da_rundir}/diagnostics``
-            * ``${da_rundir}/analysis``
-            * ``${da_rundir}/jobs``
-            * ``${da_rundir}/restart/current``
-            * ``${da_rundir}/restart/one-ago``
-
-        .. note:: The exec dir will actually be a simlink to the directory where
-                 the observation operator executable lives. This directory is passed through
-                 the ``da.rc`` file. 
-
-        .. note:: The observation input files will be placed in the exec dir,
-                 and the resulting simulated values will be retrieved from there as well.
-
-        """
-        from da.tools.general import CreateDirs
-
-# Create the run directory for this DA job, including I/O structure
-
-        filtertime = self['time.start'].strftime('%Y%m%d')
-
-        self['dir.exec'] = os.path.join(self['dir.da_run'], 'exec')
-        self['dir.input'] = os.path.join(self['dir.da_run'], 'input')
-        self['dir.output'] = os.path.join(self['dir.da_run'], 'output', filtertime)
-        self['dir.diagnostics'] = os.path.join(self['dir.da_run'], 'diagnostics')
-        self['dir.analysis'] = os.path.join(self['dir.da_run'], 'analysis')
-        self['dir.jobs'] = os.path.join(self['dir.da_run'], 'jobs')
-        self['dir.restart'] = os.path.join(self['dir.da_run'], 'restart')
-        self['dir.restart.current'] = os.path.join(self['dir.restart'], 'current')
-        self['dir.restart.oneago'] = os.path.join(self['dir.restart'], 'one-ago')
-
-        CreateDirs(self['dir.da_run'])
-        CreateDirs(os.path.join(self['dir.exec']))
-        CreateDirs(os.path.join(self['dir.input']))
-        CreateDirs(os.path.join(self['dir.output']))
-        CreateDirs(os.path.join(self['dir.diagnostics']))
-        CreateDirs(os.path.join(self['dir.analysis']))
-        CreateDirs(os.path.join(self['dir.jobs']))
-        CreateDirs(os.path.join(self['dir.restart']))
-        CreateDirs(os.path.join(self['dir.restart.current']))
-        CreateDirs(os.path.join(self['dir.restart.oneago']))
-
-        logging.info('Succesfully created the file structure for the assimilation job')
-
-
-    def RecoverRun(self):
-        """
-        Prepare a recovery from a crashed run. This consists of: 
-        
-            - copying all data from the restart/one-ago folder (:meth:`~da.tools.initexit.CycleControl.MoveRestartData`),
-            - replacing all ``rc-file`` items with those from the ``da_runtime.rc`` in the restart/current dir 
-            - resetting the seed of the random number generator to the value it had before the crash (:meth:`~da.tools.initexit.CycleControl.RandomSeed`)
-            - replacing the output dir name, since it has the sample time in it...
-            
-        """
-        import da.tools.rc as rc
-        import shutil
-        from da.tools.general import CreateDirs
-
-        # Replace rc-items with those from the crashed run's last rc-file (now in restart.current dir)
-
-        file_rc_rec = os.path.join(self['dir.restart.current'], 'da_runtime.rc')
-        rc_rec = rc.read(file_rc_rec)
-
-        for k, v in rc_rec.iteritems():
-            self[k] = v
-
-        self.ValidateRC()
-
-        logging.debug("Replaced rc-items.... ")
-        logging.debug("Next cycle start date is %s" % self['time.start'])
-
-        # Copy randomseed.pickle file to exec dir
-
-        source = os.path.join(self['dir.restart.current'], 'randomseed.pickle')
-        dest = os.path.join(self['dir.exec'], 'randomseed.pickle')
-        shutil.copy(source, dest)
-
-        logging.debug("Replaced randomseed file with previous cycles' last values")
-
-        # Re-create the output dir for this time step, if needed
-
-        self['dir.output'] = os.path.join(self['dir.da_run'], 'output', self['time.start'].strftime('%Y%m%d'))
-        CreateDirs(os.path.join(self['dir.output']))
-
-    def Finalize(self):
-        """
-        Finalize the da cycle, this means writing the save data and rc-files for the next run. 
-        The following sequence of actions occur:
-
-            * Write the randomseed to file for reuse in next cycle
-            * Write a new ``rc-file`` with ``time.restart : True``, and new ``time.start`` and ``time.end``
-            * Collect all needed data needed for check-pointing (restart from current system state)
-            * Move the previous check pointing data out of the way, and replace with current
-            * Submit the next cycle
-
-        """
-
-        self.RandomSeed('write')
-        self.WriteNewRCfile()
-        self.MoveRestartData(io_option='store')  # Move restart data from current to one-ago
-        self.CollectRestartData()  # Collect restart data for next cycle into a clean restart/current folder
-        self.CollectOutput()  # Collect restart data for next cycle into a clean restart/current folder
-        self.SubmitNextCycle()
-
-    def CollectOutput(self):
-        """ Collect files that are part of the requested output for this cycle. This function allows users to add files 
-            to a list, and then the system will copy these to the current cycle's output directory.
-            The list of files included is read from the 
-            attribute "OutputFileList" which is a simple list of files that can be appended by other objects/methods that
-            require output data to be saved.
-
-
-        """
-        from da.tools.general import CreateDirs
-
-        targetdir = os.path.join(self['dir.output'])
-
-        CreateDirs(os.path.join(targetdir))
-
-        logging.info("Collecting the required output data")
-        logging.debug("           to   directory: %s " % targetdir)
-
-        for file in set(self.OutputFileList):
-            if os.path.isdir(file): # skip dirs
-                continue
-            if not os.path.exists(file): # skip dirs
-                logging.warning("           [not found] .... %s " % file)
-                continue
-
-            logging.debug("           [copy] .... %s " % file)
-            shutil.copy(file, file.replace(os.path.split(file)[0], targetdir))
-
-
-
-    def CollectRestartData(self):
-        """ Collect files needed for the restart of this cycle in case of a crash, or for the continuation of the next cycle. 
-            All files needed are written to the restart/current directory. The list of files included is read from the 
-            attribute "RestartFileList" which is a simple list of files that can be appended by other objects/methods that
-            require restart data to be saved.
-
-            .. note:: Before collecting the files in the ``RestartFileList``, the restart/current directory will be emptied and
-                     recreated. This prevents files from accumulating in the restart/current and restart/one-ago folders. It 
-                     also means that if a file is missing from the ``RestartFileList``, it will not be available for check-pointing
-                     if your run crashes or dies!
-
-            Currently, the following files are included:
-
-                * The ``da_runtime.rc`` file
-                * The ``randomseed.pickle`` file
-                * The savestate.nc file
-                * The files in the ``ObservationOperator.RestartFileList``, i.e., restart data for the transport model
-
-
-            .. note:: We assume that the restart files for the :ref:`ObservationOperator` 
-                      reside in a separate folder, i.e, the ObservationOperator does *not* write directly to the CTDAS restart dir!
-
-        """
-        from da.tools.general import CreateDirs
-
-        targetdir = os.path.join(self['dir.restart.current'])
-        
-        logging.info("Purging the current restart directory before collecting new data")
-        
-        CreateDirs(os.path.join(targetdir), forceclean=True)
-
-        logging.info("Collecting the required restart data")
-        logging.debug("           to   directory: %s " % targetdir)
-
-        for file in set(self.RestartFileList):
-            if os.path.isdir(file): # skip dirs
-                continue
-            if not os.path.exists(file): # skip dirs
-                logging.warning("           [not found] .... %s " % file)
-            else:
-                logging.debug("           [copy] .... %s " % file)
-                shutil.copy(file, file.replace(os.path.split(file)[0], targetdir))
-
-
-    def MoveRestartData(self, io_option='restore'):
-        """ 
-        Store or restore model state to/from a restart directory. 
-
-            Two IO options are available:
-
-            (1) io_option = restore    : Get data from restart.oneago directory
-            (2) io_option = store      : Save data to restart.oneago directory
-
-            In case of a 'store' command the restart.oneago folder is re-created so that the contents are empty to begin with.
-
-        """
-        from da.tools.general import CreateDirs
-
-        if io_option not in ['store', 'restore']:
-            raise ValueError, 'Invalid option specified for io_option (%s)' % io_option
-
-        if io_option == 'store':
-
-            targetdir = self['dir.restart.oneago']
-            sourcedir = self['dir.restart.current']
-
-        elif io_option == 'restore':
-
-            sourcedir = self['dir.restart.oneago']
-            targetdir = self['dir.restart.current']
-
-# If "store" is requested, recreate target dir, cleaning the contents 
-
-        if io_option == 'store':
-            CreateDirs(os.path.join(targetdir), forceclean=True)
-
-        logging.debug("Performing a %s of data" % io_option)                                
-        logging.debug("           from directory: %s " % sourcedir)                                
-        logging.debug("           to   directory: %s " % targetdir)                              
-
-        for file in os.listdir(sourcedir):
-            file = os.path.join(sourcedir, file)
-            if not os.path.exists(file):
-                logging.debug("Cannot find requested file to move: %s " % file)
-                sys.exit(2)
-            if os.path.isdir(file): # skip dirs
-                logging.debug("           [skip] .... %s " % file)
-                continue    
-            else:
-                logging.debug("           [copy] .... %s " % file)
-                shutil.copy(file, file.replace(sourcedir, targetdir))
-
-#
-    def WriteNewRCfile(self):
-        """ Write the rc-file for the next DA cycle. 
-
-            .. note:: The start time for the next cycle is the end time of this one, while 
-                      the end time for the next cycle is the current end time + one cycle length. 
-                      
-            The resulting rc-file is written to the ``dir.exec`` so that it can be used when resubmitting the next cycle
-            
-        """
-        import da.tools.rc as rc
-        import copy
-
-        # We make a copy of the current DaCycle object, and modify the start + end dates and restart value
-
-        newDaCycle = copy.deepcopy(self)
-        newDaCycle.AdvanceCycleTimes()
-        newDaCycle['time.restart'] = True
-
-        # Create the name of the rc-file that will hold this new input, and write it
-
-        fname = os.path.join(self['dir.exec'], 'da_runtime.rc')  # current exec dir holds next rc file
-        rc.write(fname, newDaCycle)
-        logging.debug('Wrote new da_runtime.rc (%s) to exec dir' % fname)
-
-        # The rest is info needed for a system restart, so it modifies the current DaCycle object (self)
-
-        self['da.restart.fname'] = fname    # needed for next job template
-        self.RestartFileList.extend([fname])  # current restart list holds next rc file name
-
-        logging.debug('Added da_runtime.rc to the RestartFileList for later collection')
-
-
-    def WriteRC(self, fname):
-        """ Write RC file after each process to reflect updated info """
-        import da.tools.rc as rc
-
-        rc.write(fname, self)
-        logging.debug('Wrote expanded rc-file (%s)' % (fname))
-
-
-    def SubmitNextCycle(self):
-        """ 
-        Submit the next job of a DA cycle, this consists of 
-            * Changing to the working directory from which the job was started initially
-            * create a line to start the master script again with a newly created rc-file
-            * Submitting the jobfile 
-
-        If the end of the cycle series is reached, no new job is submitted.
-
-        """
-        import subprocess
-        import os
-        from string import join
-
-
-        DaPlatForm = self.DaPlatForm
-
-        if self['time.end'] < self['time.finish']:
-
-            # file ID and names
-            jobid = self['time.end'].strftime('%Y%m%d') 
-            targetdir = os.path.join(self['dir.exec'])
-            jobfile = os.path.join(targetdir, 'jb.%s.jb' % jobid)
-            logfile = jobfile.replace('.jb', '.log')
-
-            # Template and commands for job
-            jobparams = {'jobname':"j.%s" % jobid, 'jobtime':'01:30:00'}
-            template = DaPlatForm.GetJobTemplate(jobparams)
-            execcommand = os.path.join(self['dir.da_submit'], sys.argv[0]) 
-            template += 'python %s rc=%s %s >& %s' % (execcommand, self['da.restart.fname'], join(self.opts, ''), logfile,) 
-
-            # write and submit 
-
-            DaPlatForm.WriteJob(jobfile, template, jobid)
-            DaPlatForm.SubmitJob(jobfile, joblog=logfile) 
-        else:
-            logging.info('Final date reached, no new cycle started')
-
-
-
-    def SubmitSubStep(self, stepname):
-        """ 
-        Submit the next substep of a DA cycle, this consists of 
-            * getting a job template as returned by :meth:`~da.tools.baseclasses.platform.GetJobTemplate`
-            * adding the lines needed to start a next run with a newly created rc-file
-            * Writing the jobfile as done by :meth:`~da.tools.baseclasses.platform.WriteJob`
-            * Submitting the jobfile as done by :meth:`~da.tools.baseclasses.platform.WriteJob`
-
-        """
-        import subprocess
-        import os
-        from string import join
-
-        DaPlatForm = self.DaPlatForm
-
-        jobparams = {'jobname':'das.%s' % stepname}
-        template = DaPlatForm.GetJobTemplate(jobparams)
-        template += 'cd %s\n' % os.getcwd()
-        template += '%s rc=%s process=%s %s' % (sys.argv[0], self['jobrcfilename'], stepname, join(self.opts, ''),) 
-        jobfile = DaPlatForm.WriteJob(self, template, stepname)
-        jobid = DaPlatForm.SubmitJob(jobfile) 
-
-#LU cool
-    def CleanUpCycle(self):
-        """
-        Nothing to do for now anymore
-        """
-
-
-#LU nie czaaaje
-def StartLogger(level=logging.INFO):
-    """ start the logging of messages to screen"""
-
-# start the logging basic configuration by setting up a log file
-
-    logging.basicConfig(level=level,
-                        format=' [%(levelname)-7s] (%(asctime)s) py-%(module)-20s : %(message)s',
-                        datefmt='%Y-%m-%d %H:%M:%S')
-
-def ParseOptions():
-    """ 
-    Function parses options from the command line and returns the arguments as a dictionary.
-    Accepted command line arguments are:
-
-    ========  =======
-    Argument  Meaning
-    ========  =======
-    -v        verbose output in log files
-    -h        display help
-    -r        start a simulation by recovering from a previous crash
-    ========  =======
-
-    """
-    import getopt
-    import sys
-
-# Parse keywords, the only option accepted so far is the "-h" flag for help
-
-    opts = []
-    args = []
-    try:                                
-        opts, args = getopt.gnu_getopt(sys.argv[1:], "-hrv")
-    except getopt.GetoptError, msg:           
-        logging.error('%s' % msg)
-        sys.exit(2)      
-
-    for options in opts:
-        options = options[0].lower()
-        if options == '-h':
-            print ""
-            print helptext
-            sys.exit(2)      
-        if options == '-r':
-            logging.info('-r flag specified on command line: recovering from crash')
-        if options == '-v':
-            logging.info('-v flag specified on command line: extra verbose output')
-            logging.root.setLevel(logging.DEBUG)
-
-    if opts: 
-        optslist = [item[0] for item in opts]
-    else:
-        optslist = []
-
-# Parse arguments and return as dictionary
-
-    arguments = {}
-    for item in args:
-        #item=item.lower()
-
-# Catch arguments that are passed not in "key=value" format
-
-        if '=' in item:
-            key, arg = item.split('=')
-        else:
-            logging.error('%s' % 'Argument passed without description (%s)' % item)
-            raise getopt.GetoptError, arg
-
-        arguments[key] = arg
-
-
-    return optslist, arguments
-
-def ValidateOptsArgs(opts, args):
-    """ 
- Validate the options and arguments passed from the command line before starting the cycle. The validation consists of checking for the presence of an argument "rc", and the existence of
- the specified rc-file.  
- 
-    """
-
-    if not args.has_key("rc"):
-        msg = "There is no rc-file specified on the command line. Please use rc=yourfile.rc"   ; logging.error(msg)
-        raise IOError, msg
-    elif not os.path.exists(args['rc']):
-        msg = "The specified rc-file (%s) does not exist " % args['rc'] ;  logging.error(msg)
-        raise IOError, msg
-
-    # WP not needed anymore
-    #if not args.has_key('process'):
-    #    msg = "There is no process specified on the command line, assuming process=Start"   ; logging.info(msg)
-    #    args['process'] = 'start'
-    #if args['process'].lower() not in validprocesses:
-    #    msg = "The specified process (%s) is not valid"%args['process']   ; logging.error(msg)
-    #    raise IOError,msg
-
-    return opts, args
-
-
-if __name__ == "__main__":
-
-    sys.path.append('../../')
-    opts, args = ParseOptions()
-    print opts
-    print args
-
diff --git a/da/tools/io.py b/da/tools/io.py
deleted file mode 100755
index 9f38c832e1b499167a7790a8de00073a366af5a7..0000000000000000000000000000000000000000
--- a/da/tools/io.py
+++ /dev/null
@@ -1,407 +0,0 @@
-#!/usr/bin/env python
-# io.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 15 Oct 2008.
-File modified for CT data assimilation system in July 2010, Wouter Peters
-
-"""
-import datetime as dt
-from numpy import array
-import os
-
-disclaimer 	 = "This data belongs to the CarbonTracker project"
-email		 = "wouter.peters@wur.nl"
-url		 = "http://carbontracker.wur.nl"
-institution = "Wageningen University and Research Center"
-source 		 = "CarbonTracker release 2.0" 
-conventions = "CF-1.1"
-historytext	 = 'Created on ' + dt.datetime.now().strftime('%B %d, %Y') + ' by %s' % os.environ['USER']
-
-std_savedict = {'name':'unknown', 'values':None, 'dims':None, 'units':'', 'long_name':'', '_FillValue':float(-999.), 'comment':''}
-
-class CT_CDF(object):
-    """ 
-
-    The CT_CDF object is a wrapper around the NIO functionality offered by NCAR's Nio library. It can read+write data in many formats, including
-    NetCDF, HDF, and GRIB. The CT_CDF class is able to make an instance of an Nio file object, and automatically add (meta)data to it in 
-    a format that complies with CF-1.1 conventions.
-
-    The user can specify a filename when creating an instance of the CT_CDF object, as well as all arguments used by the Nio.open_file function.
-    An object is returned that holds the Nio file handle in self.file, as well as some global attributes we want in each file coming out of 
-    this data assimilation system.
-
-    The methods of the CT_CDF object are:
-
-    AddCTHeader()       : Adds a standard header to the file with information about date, user, system, contact person, and a disclaimer.
-    AddParamsDim()      : Add a dimension called 'nparams'
-    AddLatLonDim()      : Add two dimension called 'latitude' and 'longitude'
-    AddMembersDim()     : Add a dimension called 'nmembers'
-    AddObsDim()         : Add a dimension called 'nobs'
-    AddLagDim()         : Add a dimension called 'nlag', this one can be unlimited
-    AddDateDim()        : Add a dimension called 'date', this one can be unlimited
-    AddDateDimFormat()  : Add a dimension called 'datedimformat', this is a 6-integer format as in yyyy,mm,dd,HH,MM,SS
-    GetVariable()       : return a variable from the dataset by name
-    StandardVar()       : return a dictionary with precooked CF-1.1 information for a dataset to be written
-    inquire_unlimited() : get the length of the unlimited dimension
-    AddData()           : Add data from a dictionary to the file
-    close()             : close the file
-
-    """
-    def __init__(self, filename, method='create', *arguments):
-        import Nio
-
-        if method not in ['read', 'write', 'create']:
-            raise ValueError, 'Method %s is not defined for a CarbonTracker NetCDF file object' % method
-
-        if method == 'read':
-            self.file = Nio.open_file(filename, mode='r', *arguments)
-        elif method == 'write':
-            self.file = Nio.open_file(filename, mode=method, *arguments)
-            self.AddCTHeader()
-        elif method == 'create':
-            if os.path.exists(filename): os.remove(filename)
-            self.file = Nio.open_file(filename, mode=method, *arguments)
-            self.AddCTHeader()
-
-        if method != 'read': self.AddCTHeader()
-
-
-    def AddCTHeader(self):
-
-        #
-        setattr(self.file, 'Institution', institution)
-        setattr(self.file, 'Contact', email)
-        setattr(self.file, 'URL', url)
-        setattr(self.file, 'Source', source)
-        setattr(self.file, 'Convention', conventions)
-        setattr(self.file, 'Disclaimer', disclaimer)
-        setattr(self, 'History', historytext)
-
-    def AddParamsDim(self, nparams):
-
-        dimparams = self.file.create_dimension('nparameters', nparams)
-
-        return ('nparameters',)
-
-    def AddMembersDim(self, nmembers):
-
-        dimmembers = self.file.create_dimension('nmembers', nmembers)
-
-        return ('nmembers',)
-
-    def AddLagDim(self, nlag=0, unlimited=True):
-
-        if unlimited:
-            dimlag = self.file.create_dimension('nlag', None)
-        else:
-            dimlag = self.file.create_dimension('nlag', nlag)
-#LU what a sense in here?
-        dimlag = ('nlag',)
-
-        # Also create the variable of the same name so it can be queried for length
-        var = self.file.create_variable('nlag', 'i', dimlag)
-
-        return ('nlag',)
-
-    def AddObsDim(self, nobs):
-
-        dimobs = self.file.create_dimension('nobs', nobs)
-
-        return ('nobs',)
-
-    def AddLatLonDim(self, istart=0, iend=360, jstart=0, jend=180):
-        """ 
-        Add dimensions + data for latitude and longitude values of a rectangular 1x1 grid, the
-        scope of the arrays can be limited by using the istart,...,jend integers
-        
-        """
-
-        from numpy import arange, float64
-
-        if 'latitude'  in self.file.dimensions.keys(): return ('latitude', 'longitude',)  # already exists
-
-        lons = -180 + arange(360) * 1.0 + 0.5
-        lats = -90 + arange(180) * 1.0 + 0.5
-        #
-        lats = lats[jstart:jend]
-        lons = lons[istart:iend]
-        #
-        dimlon = self.file.create_dimension('longitude', lons.shape[0])
-        dimlon = ('longitude',)
-        dimlat = self.file.create_dimension('latitude', lats.shape[0])
-        dimlat = ('latitude',)
-
-        savedict = self.StandardVar(varname='latitude')
-        savedict['values'] = lats.tolist()
-        savedict['actual_range'] = (float(lats[0]), float(lats[-1]))
-        savedict['dims'] = dimlat
-        self.AddData(savedict)
-
-        savedict = self.StandardVar(varname='longitude')
-        savedict['values'] = lons.tolist()
-        savedict['actual_range'] = (float(lons[0]), float(lons[-1]))
-        savedict['dims'] = dimlon
-        self.AddData(savedict)
-
-        return dimlat + dimlon
-
-    def AddDateDim(self, ndate=0, unlimited=True):
-        """ Add a date dimension, give it the unlimited length if requested """
-
-        if unlimited:
-            dimdate = self.file.create_dimension('date', None)
-        else:
-            dimdate = self.file.create_dimension('date', ndate)
-
-        dimdate = ('date',)
-
-        # Also create the variable of the same name so it can be queried for length
-        var = self.file.create_variable('date', 'd', dimdate)
-
-        return dimdate
-
-    def AddDateDimFormat(self):
-        """ Add a dimension representing a date format as yyyy/mm/dd/HH/MM/SS"""
-
-        if 'yyyymmddhhmmss'  in self.file.dimensions.keys(): 
-            pass 
-        else:
-            dummy = self.file.create_dimension('yyyymmddhhmmss', 6)  # already exists
-        dimdateformat = ('yyyymmddhhmmss',)
-        return dimdateformat
-
-    def AddDim(self, dimname, dimsize):
-
-        if dimname  in self.file.dimensions.keys(): 
-            pass
-        else:
-            newdim = self.file.create_dimension(dimname, dimsize)
-        return (dimname,)
-
-    def has_date(self, dd):
-        """ Check if a passed date (dd) is already present in the dates included in a file """
-
-        if self.file.variables['date'].shape[0] > 0:
-            if dd in self.file.variables['date'][:].tolist():
-                return True
-            else:
-                return False
-        else:
-            return False
-            
-    def GetVariable(self, varname):
-        """ get variable from ncf file"""
-        return self.file.variables[varname][:]
-
-    def StandardVar(self, varname):
-        """ return properties of standard variables """
-        import standardvariables
-
-        if varname in standardvariables.standard_variables.keys():
-            return standardvariables.standard_variables[varname]
-        else:
-            var = std_savedict.copy()
-            var['name'] = varname
-            return var
-
-    def inquire_unlimited(self):
-        """ Get the index and name of the unlimited dimension"""
-
-        try:
-            index = self.file.dimensions.values().index(None)
-            unlimname = self.file.dimensions.keys()[index]
-            unlimlen = self.file.variables[unlimname].shape[0]
-        except:
-            unlimlen = -1
-            unlimname = 'None'
-
-        return (unlimname, unlimlen,)
-
-    def AddData(self, datadict, silent=True):
-        """ 
-        Add data to the Nio file object. This is achieved by passing a data dictionary to this routine that holds all metadata plus 
-        the real data values. For example:
-
-        savedict=std_savedict.copy()
-        savedict['name'] = 'testarray'
-        savedict['dims'] = ('date','latitude',)
-        savedict['count'] = unlimlen+n
-        savedict['values'] = np.arange(180)+n*1.5
-        ncf.AddData(savedict,silent=False)
-
-        this makes a copy of a standard variable dictionary first, and then populates it with (meta)data. The conventions are that
-
-        ['dims']   = a tuple with names of the dimensions
-        ['count']  = an integer number that represents the first dimension along which to write (usually the unlimited dim)
-        ['values'] = a list or array that holds the data values to be written. Note that one cannot pass any other object. 
-
-        The counter for the unlimited dimension is needed because multiple datasets might be added with unlimited dimensions and
-        we don't want the unlimited counter to increase everytime. To get the value of the counter (needed when appending data for instance)
-        use
-
-        unlimname,unlim_count = ncf.inquire_unlimited()
-
-        where an integer value is returned for the length, and -1 indicates there is no unlimited dimension
-
-        Note that there are special pre-cooked data dictionaries available in the module standardvariables, with many attributes
-        already specified. For instance:
-
-        savedict=ncf.StandardVar(varname='date')
-        savedict['values']= [100.0+n]
-        savedict['count'] = unlimlen+n
-        savedict['dims']=('date',)
-        ncf.AddData(savedict,silent=False)
-
-        has long_names, comments, and data ranges pre-specified.
-
-        """
-        import numpy as np
-
-        # Make sure the passed dictionary has proper values for the important entries
-
-        if None == datadict['values']:
-            raise IOError, "The passed data dictionary does not contain values, please provide data to write"
-        if None == datadict['dims']:
-            raise IOError, "The passed data dictionary does not contain valid dimensions, please provide names of dimensions for the dataset"
-
-        if not isinstance(datadict['values'], (list, np.ndarray,)):
-            raise IOError, "Please pass a list or array to the AddData() method, not a %s object" % datadict['values'].__class__
-
-        # First, try to get the requested dataset number to place next in file. Note that this is an attribute of the 
-        # savedict and has to be specified by the user. If not specified, the 'next' dataset is number 0
-
-        try: 
-            next = datadict['count']
-        except:
-            next = 0
-
-        # Check if the requested variable already exists in the file, if so, get a variable handle directly
-
-        existing_vars = self.file.variables.keys()
-
-        if datadict['name'] in existing_vars:
-
-            var = self.file.variables[datadict['name']] 
-
-        # If the variable name is new, create a new dataset
-
-        else:
-            if not silent: print 'Creating new dataset: ' + datadict['name']
-
-            if datadict.has_key('dtype'):  # datatype is part of the data dictionary passed
-
-                if datadict['dtype'] == 'int':
-                    var = self.file.create_variable(datadict['name'], 'i', datadict['dims'])
-                elif datadict['dtype'] == 'char':
-                    var = self.file.create_variable(datadict['name'], 's1', datadict['dims'])
-                elif datadict['dtype'] == 'double':
-                    var = self.file.create_variable(datadict['name'], 'd', datadict['dims'])
-                else:
-                    var = self.file.create_variable(datadict['name'], 'f', datadict['dims'])
-
-            else:
-                var = self.file.create_variable(datadict['name'], 'd', datadict['dims'])  # default is 'double'
-
-            # All other keys in the datadict are assumed to be variable attributes, except for a few reserved ones
-
-            for k, v in datadict.iteritems(): 
-                if k not in ['name', 'dims', 'values', '_FillValue', 'count']: 
-                    setattr(var, k, v)
-
-        # Now that the variable instance is returned, start to add the data. First figure out if we are dealing with an unlimited dimension
-
-        unlimname, unlimlen = self.inquire_unlimited()
-        has_unlimited_dim = (unlimname != 'None')
-
-        # If so, check if the first dimension of the passed dataset corresponds to the unlimited dimension
-
-        if has_unlimited_dim and var.dimensions[0] == unlimname:
-
-            # Add the data to the unlimited dim, note the special occassion for passing only one value (a record)
-
-            if len(datadict['values']) == 1:
-                var[next] = datadict['values'][0]
-            else:
-                var[next, :] = datadict['values']
-
-            # Add the data , note the special occassion for passing only one value (a record)
-
-        else:
-            if len(datadict['values']) == 1:
-                var.assign_value(datadict['values'][0])
-            else:
-                var[:] = datadict['values']
-
-    def close(self):
-        """ close the file object """
-
-        return self.file.close()
-
-def CreateDirs(rundat, dirname):
-
-    dirname = os.path.join(rundat.outputdir, dirname)
-    if not os.path.exists(dirname):
-        print "Creating new output directory " + dirname
-        os.makedirs(dirname)
-    else:
-        print 'Writing files to directory: %s' % (dirname,)
-    return dirname
-
-
-if __name__ == '__main__':
-
-    import sys
-    import os
-    import numpy as np
-
-    sys.path.append('../../')
-
-    import da.tools.standardvariables
-
-    try:
-        os.remove('test.nc')
-    except:
-        pass
-
-    ncf = CT_CDF('test.nc', 'w')
-    dimgrid = ncf.AddLatLonDim()
-    dimdate = ncf.AddDateDim(unlimited=True)
-    #dimlag=ncf.AddLagDim(unlimited=True)
-    #dimidate=ncf.AddDateDimFormat()
-
-    unlimname, unlimlen = ncf.inquire_unlimited()
-
-    if unlimlen < 0: unlimlen = 0
-
-    for n in range(100):
-
-        savedict = ncf.StandardVar(varname='testarray')
-        savedict['dims'] = ('date', 'latitude',)
-        savedict['count'] = unlimlen + n
-        savedict['values'] = np.arange(180) + n * 1.5
-        ncf.AddData(savedict, silent=False)
-
-        savedict = ncf.StandardVar(varname='testarray2')
-        savedict['dims'] = ('date', 'latitude',)
-        savedict['count'] = unlimlen + n
-        savedict['values'] = np.arange(180) - n * 1.5
-        ncf.AddData(savedict, silent=False)
-
-        savedict = ncf.StandardVar(varname='date')
-        savedict['values'] = [100.0 + n]
-        savedict['count'] = unlimlen + n
-        savedict['dims'] = dimdate
-        ncf.AddData(savedict, silent=False)
-
-        #savedict=ncf.StandardVar(varname='testfail')
-        #savedict['values']=range(5)
-        #ncf.AddData(savedict,silent=False)
-
-        print ncf.inquire_unlimited()
-
-    ncf.file.close()
-
diff --git a/da/tools/io4.py b/da/tools/io4.py
deleted file mode 100755
index aa7a6292df21019aeb7edab1e0b1aa316ce5edf4..0000000000000000000000000000000000000000
--- a/da/tools/io4.py
+++ /dev/null
@@ -1,452 +0,0 @@
-#!/usr/bin/env python
-# io.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 15 Oct 2008.
-File modified for CT data assimilation system in July 2010, Wouter Peters
-
-"""
-import standardvariables
-import datetime as dt
-from numpy import array, arange
-import os
-import logging
-import sys
-
-disclaimer 	= "This data belongs to the CarbonTracker project"
-email		= "wouter.peters@wur.nl"
-url		    = "http://carbontracker.wur.nl"
-institution = "Wageningen University and Research Center"
-source 		= "CarbonTracker release 2.0" 
-conventions = "CF-1.1"
-historytext	= 'created on '+dt.datetime.now().strftime('%B %d, %Y')+' by %s'%os.environ['USER']
-
-std_savedict={'name':'unknown','values':[],'dims':(0,0,),'units':'','long_name':'','comment':''}
-
-def CT_Read(filename='',method=''):
-    """ read from an HDF or NetCDF file. Function choses itself which type is needed """
-
-    if 'hdf' in filename.split('.'):
-        return CT_HDF(filename,method)
-    elif 'nc' in filename.split('.'):
-        return CT_CDF(filename,method)
-    else:
-        msg = 'Could not determine whether input file was NetCDF or HDF trying both: ' ; logging.warning(msg)
-        try:
-            return CT_CDF(filename,method)
-        except:
-            return CT_HDF(filename,method)
-
-import netCDF4
-class CT_CDF(netCDF4.Dataset):
-    """ function opens a NetCDF file for writing of output"""
-
-    def __init__(self,filename, method='read'):
-
-        if method not in ['read','write','create']:
-            raise ValueError, 'Method %s is not defined for a CarbonTracker NetCDF file object' % method
-
-        if method == 'read':
-            #print 'Reading from file'
-	    try:
-            	super(CT_CDF,self).__init__(filename, 'r') 
-	    except RuntimeError: 
-                logging.error('Requested file not found for opening: %s'%filename)
-                logging.info("Exiting")
-                sys.exit(2)
-        elif method == 'write':
-            #print 'Adding to existing file'
-            try:
-                super(CT_CDF,self).__init__(filename, 'a')
-            except:
-                super(CT_CDF,self).__init__(filename, 'w',format='NETCDF4') 
-
-            self.AddCTHeader()
-        elif method == 'create':
-            if os.path.exists(filename): os.remove(filename)
-            super(CT_CDF,self).__init__(filename, 'w',format='NETCDF4') 
-            #super(CT_CDF,self).__init__(filename, 'w',format='NETCDF3_CLASSIC') 
-            self.AddCTHeader()
-
-
-    def AddCTHeader(self):
-
-        #
-        self.setncattr('Institution',institution)
-        self.setncattr('Contact',email)
-        self.setncattr('URL',url)
-        self.setncattr('Source',source)
-        self.setncattr('Convention',conventions)
-        self.setncattr('Disclaimer',disclaimer)
-        self.setncattr('History',historytext)
-
-    def AddParamsDim(self,nparams):
-
-        if 'nparameters' in self.dimensions.keys():
-            pass
-        else:
-            dimparams=self.createDimension('nparameters',size=nparams)
-
-        return ('nparameters',)
-
-    def AddMembersDim(self,nmembers):
-
-        if 'nmembers' in self.dimensions.keys():
-            pass
-        else:
-            dimmembers=self.createDimension('nmembers',size=nmembers)
-
-        return ('nmembers',)
-
-    def AddLagDim(self,nlag,unlimited=True):
-
-        if 'nlag' in self.dimensions.keys():
-            pass
-        else:
-            if unlimited:
-                dimlag = self.createDimension('nlag',size=None)
-            else:
-                dimlag = self.createDimension('nlag',size=nlag)
-
-        return ('nlag',)
-
-    def AddObsDim(self,nobs):
-
-        if 'nobs' in self.dimensions.keys():
-            pass
-        else:
-            dimobs = self.createDimension('nobs',size=nobs)
-
-        return ('nobs',)
-
-    def AddLatLonDim(self,istart=0,iend=360,jstart=0,jend=180):
-
-        from numpy import arange, float64
-
-        if 'latitude'  in self.dimensions.keys(): return ('latitude','longitude',)
-
-        lons=-180+arange(360)*1.0+0.5
-        lats=-90+arange(180)*1.0+0.5
-        #
-        lats=lats[jstart:jend]
-        lons=lons[istart:iend]
-        #
-        dimlon = self.createDimension('longitude',size=lons.shape[0])
-        dimlat = self.createDimension('latitude',size=lats.shape[0])
-
-        savedict=self.StandardVar(varname='latitude')
-        savedict['values']=lats.tolist()
-        savedict['actual_range']=(float(lats[0]),float(lats[-1]))
-        savedict['dims']=('latitude',)
-        self.AddData(savedict)
-
-        savedict=self.StandardVar(varname='longitude')
-        savedict['values']=lons.tolist()
-        savedict['actual_range']=(float(lons[0]),float(lons[-1]))
-        savedict['dims']=('longitude',)
-        self.AddData(savedict)
-
-        return ('latitude','longitude',)
-
-    def AddRegionDim(self,type='eco',dimsize=None):
-
-        from da.analysis.tools_transcom import olsonlabs, transnams, ext_transnams, ext_transcomps
-        from da.analysis.tools_regions import ext_econams, ext_ecocomps
-
-        if type not in ['eco','eco_ext','tc','tc_ext','olson']: 
-            raise ValueError,'Type of dimension for regions requested (%s) is not possible' %type
-
-        dimname='regions_%s' % type
-
-        if dimname in self.dimensions.keys(): 
-            return (dimname,)
-
-        if type == 'olson':
-
-            dim = self.createDimension(dimname,size=len(olsonlabs))
-
-            for i,name in enumerate(olsonnams):
-                att = setattr(self, 'OlsonEcosystem_%03d'%(i+1,), name  )
-
-        elif type == 'tc':
-
-            dim = self.createDimension(dimname,size=len(transnams))
-            for i,name in enumerate(transnams):
-                att = setattr(self, 'TransComRegion_%03d'%(i+1,), name  )
-
-        elif type == 'tc_ext':
-
-            dim = self.createDimension(dimname,size=len(ext_transnams))
-
-            for i,name in enumerate(ext_transnams):
-                    lab='Aggregate_Region_%03d'%(i+1,)
-                    setattr(self,lab,name)
-            for i,name in enumerate(ext_transcomps):
-                    lab='Aggregate_Components_%03d'%(i+1,)
-                    setattr(self,lab,name)
-
-        elif type == 'eco':
-
-            dim = self.createDimension(dimname,size=dimsize)
-
-        return (dimname,)
-
-
-    def AddDateDim(self,unlimited=False):
-
-        if 'date' in self.dimensions.keys():
-            pass
-        else:
-            dimdate = self.createDimension('date',size=None)
-
-        return ('date',)
-
-    def AddDateDimFormat(self):
-
-        if 'yyyymmddhhmmss'  in self.dimensions.keys(): 
-            pass
-        else:
-            dimdateformat = self.createDimension('yyyymmddhhmmss',size=6)
-            return ('yyyyymmddhhmmss',)
-
-    def AddDim(self,dimname,dimsize):
-
-        if dimname  in self.dimensions.keys(): 
-            pass
-        else:
-            newdim = self.createDimension(dimname,dimsize)
-        return (dimname,)
-
-    def has_date(self,dd):
-
-        if not self.dimensions.has_key('date'): 
-            return False
-        if not self.variables.has_key('date'): 
-            return False
-        if self.dimensions['date'].isunlimited:
-            if dd in self.GetVariable('date').tolist():
-                return True
-            else:
-                return False
-        else:
-            return False
-            
-    def GetVariable(self,varname):
-        """ get variable from ncf file"""
-        return self.variables[varname][:]
-
-    def GetAttribute(self,attname):
-        """ get attribute from ncf file"""
-        return getattr(self,attname)
-
-    def StandardVar(self,varname):
-        """ return properties of standard variables """
-        import standardvariables
-
-        if varname in standardvariables.standard_variables.keys():
-            return standardvariables.standard_variables[varname]
-        else:
-            return standardvariables.standard_variables['unknown']
-
-    def inq_unlimlen(self):
-        """ return lenght of unlimited dimenion(s) """
-
-        unlims=()
-        for dimname, dimobj in self.dimensions.iteritems():
-            if dimobj.isunlimited() : unlims += (len(dimobj),)
-
-        return unlims
-
-    def has_unlimlen(self,dims):
-        """ return T/F whether dimensions include an unlimited dimenion(s) """
-
-        for dimname, dimobj in self.dimensions.iteritems():
-            if dimname in dims:
-                if dimobj.isunlimited() : return True
-
-        return False
-
-    def AddData(self,datadict,nsets=1,silent=True):
-        """ add fields to file, at end of unlimited dimension"""
-        import numpy as np
-
-        existing_vars=self.variables
-
-        try: 
-            next = datadict['count']
-        except:
-            next=0
-
-
-        if existing_vars.has_key(datadict['name']):
-            var = self.variables[datadict['name']] 
-            ndims = var.ndim
-
-            datadict = ConvertCharDims(var,datadict)
-
-            if ndims == 1:
-                var[next:next+nsets]=datadict['values']
-            elif ndims == 2:
-                var[next:next+nsets,:]=datadict['values']
-            elif ndims == 3:
-                var[next:next+nsets,:,:]=datadict['values']
-            elif ndims == 4:
-                var[next:next+nsets,:,:,:]=datadict['values']
-            elif ndims == 5:
-                var[next:next+nsets,:,:,:,:]=datadict['values']
-            else:
-                print 'More than 5 dimensions in array not implemented yet'
-                raise ValueError
-
-        else:
-            if not silent: print 'Creating new dataset: '+datadict['name']
-
-            if datadict.has_key('dtype'):
-                if datadict['dtype'] == 'int':
-                    var = self.createVariable(datadict['name'],'i4',datadict['dims'])#,fill_value=datadict['_FillValue'])
-                elif datadict['dtype'] == 'char':
-                    var = self.createVariable(datadict['name'],'S1',datadict['dims'],fill_value='x')
-                elif datadict['dtype'] == 'float':
-                    var = self.createVariable(datadict['name'],'f4',datadict['dims'])#,fill_value=datadict['_FillValue'])
-                elif datadict['dtype'] == 'double':
-                    var = self.createVariable(datadict['name'],'f8',datadict['dims'])#,fill_value=datadict['_FillValue'])
-                else:
-                    var = self.createVariable(datadict['name'],'f8',datadict['dims'])#,fill_value=datadict['_FillValue'])
-            else:
-                var = self.createVariable(datadict['name'],'f4',datadict['dims'])#,fill_value=datadict['_FillValue'])
-
-            for k,v in datadict.iteritems(): 
-                if k not in ['name','dims','values','_FillValue','count']: 
-                    var.setncattr(k,v)
-
-            if nsets > 1 or self.has_unlimlen(datadict['dims']) == True:
-                ndims = var.ndim
-
-                datadict = ConvertCharDims(var,datadict)
-                if ndims == 1:
-                    var[next:next+nsets]=datadict['values']
-                elif ndims == 2:
-                    var[next:next+nsets,:]=datadict['values']
-                elif ndims == 3:
-                    var[next:next+nsets,:,:]=datadict['values']
-                elif ndims == 4:
-                    var[next:next+nsets,:,:,:]=datadict['values']
-                elif ndims == 5:
-                    var[next:next+nsets,:,:,:,:]=datadict['values']
-                else:
-                    print 'More than 5 dimensions in array not implemented yet'
-                    raise ValueError
-            else:
-                ndims = var.ndim
-
-                datadict = ConvertCharDims(var,datadict)
-
-                var[:] = datadict['values']
-
-
-import pyhdf.SD as hdf
-
-class CT_HDF(hdf.SD):
-    """ function opens a HDF file for reading """
-
-    def __init__(self,filename, method='read'):
-
-        if method in ['write','create']:
-            raise ValueError, 'Method %s is not defined for a CarbonTracker HDF file object' % method
-
-        if method == 'read':
-            #print 'Reading from file'
-	    try:
-            	super(CT_HDF,self).__init__(filename) 
-	    except hdf.HDF4Error: 
-                logging.error('Requested file not found for opening: %s'%filename)
-                logging.info("Exiting")
-                sys.exit(2)
-
-    def GetVariable(self,varname):
-        """ get variable from ncf file"""
-        return self.select(varname).get()
-
-    def GetAttribute(self,attname):
-        """ get attribute from ncf file"""
-        return getattr(self,attname)
-
-    def StandardVar(self,varname):
-        """ return properties of standard variables """
-        import standardvariables
-
-        if varname in standardvariables.standard_variables.keys():
-            return standardvariables.standard_variables[varname]
-        else:
-            return standardvariables.standard_variables['unknown']
-
-    def close(self):
-        """ close file"""
-
-        return self.end()
-
-
-def ConvertCharDims(var,datadict):
-
-    if not var.dtype == 'S1': 
-        pass
-    else:
-        datalen = len(datadict['values'])
-        dimlen = list(var.shape)
-
-        dimlen.remove(datalen) # string length remaining 
-
-        slen=dimlen[0]
-
-        #print [d for d in datadict['values'] ]
-        values = [netCDF4.stringtoarr(d,slen) for d in datadict['values'] ] 
-        datadict['values']  = values
-
-    return datadict
-
-def GetVariable(file,varname):
-    """ get variable from HDF file"""
-    return array(file.select(varname).get())
-
-def createDirs(rundat,dirname):
-
-    dirname=os.path.join(rundat.outputdir,dirname)
-    if not os.path.exists(dirname):
-        print "Creating new output directory "+dirname
-        os.makedirs(dirname)
-    else:
-        print 'Writing files to directory: %s'%(dirname,)
-    return dirname
-
-
-if __name__ == '__main__':
-    import numpy as np
-
-    ncf=CT_CDF('test.nc','create')
-    print ncf.file_format
-    dimmembers=ncf.AddMembersDim(200)
-    dimparams=ncf.AddParamsDim(200)
-
-    dimdate=ncf.AddDateDim()
-    dimidate=ncf.AddDateDimFormat()
-    dimlon,dimlat=ncf.AddLatLonDim()
-
-    savedict=std_savedict.copy()
-    savedict['name']='testvar'
-    savedict['values']=np.zeros((200,2,))+2.0
-    savedict['dims']=('nparameters','date',)
-    ncf.AddData(savedict)
-
-    savedict=std_savedict.copy()
-    savedict['name']='testvar'
-    savedict['values']=np.zeros((200,3,))+3.0
-    savedict['dims']=('nparameters','date',)
-    savedict['count']=2
-    ncf.AddData(savedict,nsets=3)
-
-    ncf.close()
-
-
diff --git a/da/tools/io_cdf.py b/da/tools/io_cdf.py
deleted file mode 100755
index 18592a8130ae151b4cfb7dc88e0e547107ba408d..0000000000000000000000000000000000000000
--- a/da/tools/io_cdf.py
+++ /dev/null
@@ -1,225 +0,0 @@
-#!/usr/bin/env python
-# io.py
-
-"""
-Author : peters 
-
-Revision History:
-File created on 15 Oct 2008.
-File modified for CT data assimilation system in July 2010, Wouter Peters
-
-"""
-import standardvariables
-import pycdf as CDF
-import datetime as dt
-from numpy import array
-import os
-
-disclaimer 	= "This data belongs to the CarbonTracker project"
-email		= "wouter.peters@wur.nl"
-url		    = "http://carbontracker.wur.nl"
-institution = "Wageningen University and Research Center"
-source 		= "CarbonTracker release 2.0" 
-conventions = "CF-1.1"
-historytext	= 'Created on '+dt.datetime.now().strftime('%B %d, %Y')+' by %s'%os.environ['USER']
-
-std_savedict={'name':'unknown','values':[],'dims':(0,0,),'units':'','long_name':'','_FillValue':float(-999.),'comment':''}
-
-class CT_CDF(CDF.CDF):
-    """ function opens a NetCDF/HDF/GRIB file for writing of output"""
-    def __init__(self,filename, method='read'):
-
-        if method not in ['read','write','create']:
-            raise ValueError, 'Method %s is not defined for a CarbonTracker NetCDF file object' % method
-
-        if method == 'read':
-            print 'Reading from file'
-            super(CDF.CDF,self).__init__(filename, CDF.NC.NOWRITE)
-        elif method == 'write':
-            #print 'Adding to existing file'
-            super(CT_CDF,self).__init__(filename, CDF.NC.WRITE|CDF.NC.CREATE)
-            self.AddCTHeader()
-        elif method == 'create':
-            #print 'Creating new file'
-            super(CT_CDF,self).__init__(filename, CDF.NC.WRITE|CDF.NC.TRUNC|CDF.NC.CREATE)
-            self.AddCTHeader()
-
-
-    def AddCTHeader(self):
-
-        self.automode()
-        #
-        setattr(self,'Institution',institution)
-        setattr(self,'Contact',email)
-        setattr(self,'URL',url)
-        setattr(self,'Source',source)
-        setattr(self,'Convention',conventions)
-        setattr(self,'Disclaimer',disclaimer)
-        setattr(self,'History',historytext)
-
-    def AddParamsDim(self,nparams):
-
-        self.automode()
-        dimparams=self.def_dim('nparameters',nparams)
-
-        return (dimparams,)
-
-    def AddMembersDim(self,nmembers):
-
-        self.automode()
-        dimmembers=self.def_dim('nmembers',nmembers)
-
-        return (dimmembers,)
-
-    def AddLagDim(self,nlag,unlimited=True):
-
-        self.automode()
-        if unlimited:
-            dimlag =self.def_dim('nlag',CDF.NC.UNLIMITED)
-        else:
-            dimlag=self.def_dim('nlag',nlag)
-
-        return (dimlag,)
-
-    def AddObsDim(self,nobs):
-
-        self.automode()
-        dimobs=self.def_dim('nobs',nobs)
-
-        return (dimobs,)
-
-    def AddLatLonDim(self,istart=0,iend=360,jstart=0,jend=180):
-
-        from numpy import arange, float64
-
-        if 'latitude'  in self.dimensions(): return (self.dim('latitude'),self.dim('longitude'),)  # already exists
-
-        lons=-180+arange(360)*1.0+0.5
-        lats=-90+arange(180)*1.0+0.5
-        #
-        lats=lats[jstart:jend]
-        lons=lons[istart:iend]
-        #
-        self.automode()
-        dimlon=self.def_dim('longitude',lons.shape[0])
-        dimlat=self.def_dim('latitude',lats.shape[0])
-
-        savedict=self.StandardVar(varname='latitude')
-        savedict['values']=lats.tolist()
-        savedict['actual_range']=(float(lats[0]),float(lats[-1]))
-        savedict['dims']=(dimlat,)
-        self.AddData(savedict)
-
-        savedict=self.StandardVar(varname='longitude')
-        savedict['values']=lons.tolist()
-        savedict['actual_range']=(float(lons[0]),float(lons[-1]))
-        savedict['dims']=(dimlon,)
-        self.AddData(savedict)
-
-        return (dimlat,dimlon,)
-
-    def AddDateDim(self):
-
-        self.automode()
-        if 'date'  in self.dimensions(): return (self.dim('date'),)
-        return (self.def_dim('date',CDF.NC.UNLIMITED),)
-
-    def AddDateDimFormat(self):
-
-        self.automode()
-        if 'yyyymmddhhmmss'  in self.dimensions(): return (self.dim('yyyymmddhhmmss'),)  # already exists
-        return (self.def_dim('yyyymmddhhmmss',6),)
-
-    def AddDim(self,dimname,dimsize):
-
-        if dimname  in self.dimensions(): 
-            pass
-        else:
-            newdim = self.def_dim(dimname,dimsize)
-        return (newdim,)
-
-    def has_date(self,dd):
-
-        if self.inq_unlimlen() > 0:
-            if dd in self.GetVariable('date').tolist():
-                return True
-            else:
-                return False
-        else:
-            return False
-            
-    def GetVariable(self,varname):
-        """ get variable from ncf file"""
-        return array(self.var(varname).get())
-
-    def StandardVar(self,varname):
-        """ return properties of standard variables """
-        import standardvariables
-
-        if varname in standardvariables.standard_variables.keys():
-            return standardvariables.standard_variables[varname]
-        else:
-            return standardvariables.standard_variables['unknown']
-
-    def AddData(self,datadict,nsets=1,silent=True):
-        """ add fields to file, at end of unlimited dimension"""
-
-        existing_vars=self.variables()
-
-        try: 
-            next = datadict['count']
-        except:
-            next=0
-
-
-        if existing_vars.has_key(datadict['name']):
-            var = self.var(datadict['name']) 
-            var[next:next+nsets]=datadict['values']
-        else:
-            if not silent: print 'Creating new dataset: '+datadict['name']
-
-            if datadict.has_key('dtype'):
-                if datadict['dtype'] == 'int':
-                    var = self.def_var(datadict['name'],CDF.NC.INT,datadict['dims'])
-                elif datadict['dtype'] == 'char':
-                    var = self.def_var(datadict['name'],CDF.NC.CHAR,datadict['dims'])
-                elif datadict['dtype'] == 'double':
-                    var = self.def_var(datadict['name'],CDF.NC.DOUBLE,datadict['dims'])
-                else:
-                    var = self.def_var(datadict['name'],CDF.NC.FLOAT,datadict['dims'])
-            else:
-                var = self.def_var(datadict['name'],CDF.NC.FLOAT,datadict['dims'])
-            for k,v in datadict.iteritems(): 
-                if k not in ['name','dims','values','_FillValue','count']: 
-                    setattr(var,k,v)
-            if var.isrecord():
-                var[next:next+nsets]=datadict['values']
-            else:
-                var[:]=datadict['values']
-
-def GetVariable(file,varname):
-    """ get variable from HDF file"""
-    return array(file.select(varname).get())
-
-def CreateDirs(rundat,dirname):
-
-    dirname=os.path.join(rundat.outputdir,dirname)
-    if not os.path.exists(dirname):
-        print "Creating new output directory "+dirname
-        os.makedirs(dirname)
-    else:
-        print 'Writing files to directory: %s'%(dirname,)
-    return dirname
-
-
-if __name__ == '__main__':
-
-    try:
-        os.remove('test.nc')
-    except:
-        pass
-    ncf=CT_CDF('test.nc','create')
-    dimgrid=ncf.AddLatLonDim()
-    dimdate=ncf.AddDateDim()
-    dimidate=ncf.AddDateDimFormat()
-
diff --git a/da/tools/pipeline.py b/da/tools/pipeline.py
deleted file mode 100755
index 8e1247062dbf8fae47c6ddfc6f675d3f08644695..0000000000000000000000000000000000000000
--- a/da/tools/pipeline.py
+++ /dev/null
@@ -1,299 +0,0 @@
-#!/usr/bin/env python
-# pipeline.py
-
-"""
-.. module:: pipeline
-.. moduleauthor:: Wouter Peters 
-
-Revision History:
-File created on 06 Sep 2010.
-
-The pipeline module holds methods that execute consecutive tasks with each of the objects of the DA system. 
-
-"""
-import logging
-import os
-import sys
-import shutil
-import datetime
-import copy
-import da.tools.rc as rc
-
-header = '\n\n    ***************************************   '
-footer = '    *************************************** \n  '
-
-def EnsembleSmootherPipeline(DaCycle, PlatForm, DaSystem, Samples, StateVector, ObsOperator, Optimizer):
-    """ The main point of entry for the pipeline """
-
-# Append current working dir to path
-
-    sys.path.append(os.getcwd())
-
-# Import methods and classes contained in this package
-
-    from da.tools.general import StoreData, RestoreData
-    
-    logging.info(header + "Initializing current cycle" + footer) 
-
-    JobStart(DaCycle, DaSystem, PlatForm, StateVector, Samples, ObsOperator)
-
-    logging.info(header + "starting PrepareState" + footer)            
-
-    PrepareState(DaCycle, StateVector)
-
-    logging.info(header + "starting SampleState" + footer)             
-
-    SampleState(DaCycle, Samples, StateVector, ObsOperator)
-
-    logging.info(header + "starting Invert" + footer)              
-
-    Invert(DaCycle, StateVector, Optimizer)
-
-    logging.info(header + "starting Advance" + footer)           
-
-    Advance(DaCycle, Samples, StateVector, ObsOperator)
-    
-    logging.info(header + "starting SaveAndSubmit" + footer)     
-    
-    SaveAndSubmit(DaCycle, StateVector)
-    
-    logging.info("Cycle finished...exiting pipeline")     
-
-####################################################################################################
-
-def JobStart(DaCycle, DaSystem, DaPlatForm, StateVector, Samples, ObsOperator):
-    """ Set up the job specific directory structure and create an expanded rc-file """
-
-    DaSystem.Initialize()
-    DaSystem.Validate()
-    
-    DaCycle.DaSystem = DaSystem
-    DaCycle.DaPlatForm = DaPlatForm
-
-    DaCycle.Initialize()
-
-    StateVector.DaCycle = DaCycle # also embed object in StateVector so it can access cycle information for I/O etc
-    Samples.DaCycle = DaCycle # also embed object in Samples object so it can access cycle information for I/O etc
-    ObsOperator.DaCycle = DaCycle # also embed object in ObsOperator object so it can access cycle information for I/O etc
-
-
-def PrepareState(DaCycle, StateVector):
-    """ Set up the input data for the forward model: obs and parameters/fluxes"""
-
-    # We now have an empty StateVector object that we need to populate with data. If this is a continuation from a previous cycle, we can read
-    # the previous StateVector values from a NetCDF file in the restart/current directory. If this is the first cycle, we need to populate the StateVector
-    # with new values for each week. After we have constructed the StateVector, it will be propagated by one cycle length so it is ready to be used
-    # in the current cycle
-
-    StateVector.Initialize()
-
-    if not DaCycle['time.restart']:
-
-        # Fill each week from n=1 to n=nlag with a new ensemble
-        for n in range(0, StateVector.nlag):
-            date = DaCycle['time.start'] + datetime.timedelta(days=(n + 0.5) * int(DaCycle['time.cycle']))
-            cov = StateVector.GetCovariance(date)
-            StateVector.MakeNewEnsemble(n + 1, cov)
-
-    else:
-
-        # Read the StateVector data from file
-
-        #filtertime = DaCycle['time.start'].strftime('%Y%m%d')
-        filename = os.path.join(DaCycle['dir.restart.current'], 'savestate.nc')
-        StateVector.ReadFromFile(filename) # by default will read "opt"(imized) variables, and then propagate
-
-        # Now propagate the ensemble by one cycle to prepare for the current cycle
-        StateVector.Propagate()
-
-   # Finally, also write the StateVector to a file so that we can always access the a-priori information
-
-    filename = os.path.join(DaCycle['dir.output'], 'savestate.nc')
-    StateVector.WriteToFile(filename)  # write prior info because StateVector.Isoptimized == False for now
-
-
-def SampleState(DaCycle, Samples, StateVector, ObservationOperator):
-    """ Sample the filter state for the inversion """
-
-
-    # Before a forecast step, save all the data to a save/tmp directory so we can later recover it before the propagation step.
-    # This is especially important for:
-    #  (i) The transport model restart data which holds the background mixing ratios. This is needed to run the model one cycle forward
-    #  (ii) The random numbers (or the seed for the random number generator) so we can recreate the ensembles if needed
-
-    #dummy   = DaCycle.MoveSaveData(io_option='store',save_option='partial',filter=[])
-    #msg     = "All restart data have been copied to the save/tmp directory for future use"    ; logging.debug(msg)
-
-    nlag = int(DaCycle['time.nlag'])
-    logging.info("Sampling model will be run over %d cycles" % nlag)
-
-    for lag in range(nlag):
-#LU czy tu nie wystarczy inaczej ta krotke, bez nawiasow? i w ogole?
-#LU tutaj nie powinno byc int(lag) tylko lag
-        logging.info(header + ".....Ensemble Kalman Filter at lag %d" % (int(lag) + 1,)) 
-        SampleOneCycle(DaCycle, Samples, StateVector, ObservationOperator, lag)
-
-    # Optionally, post-processing of the model output can be added that deals for instance with
-    # sub-sampling of time series, vertical averaging, etc.
-
-
-def SampleOneCycle(DaCycle, Samples, StateVector, ObservationOperator, lag):
-    """ Perform all actions needed to sample one cycle """
-    
-    # First set up the information for time start and time end of this sample
-#LU dla pierwszego w sampleOneCycle, bierzemy initial data z obsoperatora
-
-    if lag == 0:
-        ObservationOperator.GetInitialData()
-#LU a to co robi??
-    DaCycle.SetSampleTimes(lag)
-
-    startdate = DaCycle['time.sample.start'] 
-    enddate = DaCycle['time.sample.end'] 
-
-    DaCycle['time.sample.window'] = lag
-    DaCycle['time.sample.stamp'] = "%s_%s" % (startdate.strftime("%Y%m%d%H"), enddate.strftime("%Y%m%d%H"))
-
-    logging.info("New simulation interval set : ")
-    logging.info("                  start date : %s " % startdate.strftime('%F %H:%M'))
-    logging.info("                  end   date : %s " % enddate.strftime('%F %H:%M'))
-    logging.info("                  file  stamp: %s " % DaCycle['time.sample.stamp'])
-
-
-    # Implement something that writes the ensemble member parameter info to file, or manipulates them further into the 
-    # type of info needed in your transport model
-
-    StateVector.WriteMembersToFile(lag + 1)
-    Samples.Initialize()
-    Samples.Validate() 
-    Samples.AddObs() 
-
-    filename = Samples.WriteSampleInfo() 
-
-    # Write filename to DaCycle, and to output collection list
-
-    DaCycle['ObsOperator.inputfile'] = filename
-    DaCycle.OutputFileList.append(filename)
-    logging.debug("Appended Obs filename to DaCycle for collection ")
-
-    # Run the observation operator
-
-    RunForecastModel(DaCycle, ObservationOperator)
-
-    # Add the observation operator restart+output files to the general Output+RestartFileList, only after 'Advance'
-#LU again: if lag == 0
-#LU to znaczy wiec ze jesli mamy lag = 0 to restartfilelist jest brana z obsoperator
-#LU output tez, a wczesniej dodano tez inputfilename z obsoperator... czyli zawsze input jest outputem
-    if lag == 0:
-        DaCycle.RestartFileList.extend(ObservationOperator.RestartFileList)
-        DaCycle.OutputFileList.extend(ObservationOperator.OutputFileList)
-    	logging.debug("Appended ObsOperator restart and output file lists to DaCycle for collection ")
-
-    # Add model-data mismatch to all samples, this *might* use output from the ensemble in the future??
-
-    Samples.AddModelDataMismatch()
-    logging.debug("Added Model Data Mismatch to all samples ")
-
-    # Read forecast model samples that were written to NetCDF files by each member, also add the obs to the statevector
-    # Note that obs will only be added to the statevector is either this is the first step (restart=False), or lag==nlag
-#LU jesli jest ostatni, wtedy bedziemy dodawac symulacje.
-    if lag == int(DaCycle['time.nlag']) - 1 or DaCycle['time.restart'] == False:
-
-        # We retrieve all model samples from one output file written by the ObsOperator. If the ObsOperator creates
-        # one file per member, some logic needs to be included to merge all files!!!
-         
-        filename = os.path.join(ObservationOperator.outputdir, 'flask_%s.nc' % DaCycle['time.sample.stamp'])
-        Samples.AddSimulations(filename)
-
-        # Give each member a model sample by first copying all samples, and then selecting the data for Member #n
-
-        members = StateVector.EnsembleMembers[lag]
-        for n, Member in enumerate(members):
-#LU is it necessary like that??
-#LU najpierw mamy sample do kazdego membera taka sama. a potem...
-            Member.ModelSample = copy.deepcopy(Samples)
-            # Now subselect the data needed for member #n
-            for Sim in Member.ModelSample.Data:
-                Sim.simulated = Sim.simulated[n]
-        StateVector.nobs += Samples.getlength()
-        logging.debug("Added samples from the observation operator to each member of the StateVector")
-
-    logging.debug("StateVector now carries %d samples" % StateVector.nobs)
-
-
-def Invert(DaCycle, StateVector, Optimizer):
-    """ Perform the inverse calculation """
-
-    dims = (int(DaCycle['time.nlag']),
-                  int(DaCycle['da.optimizer.nmembers']),
-                  int(DaCycle.DaSystem['nparameters']),
-                  StateVector.nobs,)
-
-    Optimizer.Initialize(dims)
-    Optimizer.StateToMatrix(StateVector)
-    Optimizer.WriteDiagnostics(DaCycle, StateVector, type='prior')
-
-    Optimizer.SetLocalization('None')
-
-    if not DaCycle.DaSystem.has_key('opt.algorithm'):
-        logging.info("There was no minimum least squares algorithm specified in the DA System rc file (key : opt.algorithm)")
-        logging.info("...using serial algorithm as default...")
-        Optimizer.SerialMinimumLeastSquares()
-    elif DaCycle.DaSystem['opt.algorithm'] == 'serial':
-        logging.info("Using the serial minimum least squares algorithm to solve ENKF equations")
-        Optimizer.SerialMinimumLeastSquares()
-    elif DaCycle.DaSystem['opt.algorithm'] == 'bulk':
-        logging.info("Using the bulk minimum least squares algorithm to solve ENKF equations")
-        Optimizer.BulkMinimumLeastSquares()
-#LU czy tu by sie nie przydalo jakies else error?
-
-    Optimizer.MatrixToState(StateVector)
-    Optimizer.WriteDiagnostics(DaCycle, StateVector, type='optimized')
-
-    StateVector.isOptimized = True
-
-def Advance(DaCycle, Samples, StateVector, ObservationOperator):
-    """ Advance the filter state to the next step """
-
-    # This is the advance of the modeled CO2 state. Optionally, routines can be added to advance the state vector (mean+covariance)
-    # Then, restore model state from the start of the filter
-
-    logging.info("Sampling model will be run over 1 cycle")
-    SampleOneCycle(DaCycle, Samples, StateVector, ObservationOperator, 0)
-
-
-def SaveAndSubmit(DaCycle, StateVector):
-    """ Save the model state and submit the next job """
-
-    filename = os.path.join(DaCycle['dir.output'], 'savestate.nc')
-    
-    StateVector.WriteToFile(filename)
-    DaCycle.RestartFileList.append(filename) # write optimized info because StateVector.Isoptimized == False for now
-    DaCycle.Finalize()
-
-def RunForecastModel(DaCycle, ObsOperator):
-    """Prepare and execute a forecast step using an external Fortran model. Note that the flavor of model 
-       used is determined in the very first line where the import statement of module "model" depends on a 
-       setting in your da.rc file. After that choice, the module written specifically for a particular 
-       application takes over. There are a few required variables and methods in such a module:
-       
-       version [variable, string]    : defines the version number of the module, e.g. "1.0"
-       identifier [variable, string] : identifies the model used as a string variable, e.g. "TM5"
-       PrepareExe (method)           : A method that preps the model simulation. It can for instance create an input 
-                                       parameter file for the model (tm5.rc), or execute some scripts needed before the 
-                                       actual execution starts (get_meteo). After this step, a call to the model 
-                                       executable should start a succesfull simulation
-       Run        (method)           : Start the executable. How this is done depends on your model and might involve 
-                                       submitting a string of jobs to the queue, or simply spawning a subprocess, or ...
-       """
-
-    ObsOperator.Initialize()
-    #LU is that fcn existing
-    ObsOperator.ValidateInput() 
-    ObsOperator.Run()
-    ObsOperator.SaveData()
-
-
-
-
-
diff --git a/da/tools/rc.py b/da/tools/rc.py
deleted file mode 100755
index 02a1047d915e2a81052e7a963eca5b31bb19d50b..0000000000000000000000000000000000000000
--- a/da/tools/rc.py
+++ /dev/null
@@ -1,1147 +0,0 @@
-#! /usr/bin/env python
-# rc.py
-
-
-# ------------------------------------------------
-# help
-# ------------------------------------------------
-
-"""
-Deal with model settings in `rc` format.
-
-RCFILES
-
-    A rcfile is a text file with key/value pairs seperated by a ':', e.g.
-
-      my.flag    :  T
-      my.answer  :  42
-
-    The following functionality is supported:
-
-     * Empty lines are ignored.
-
-     * Comment lines are introduced by a '!' as first character.
-
-     * Long values could be continued at the next line after a '\' as last character.
-
-     * Include the key/value pairs from another file:
-
-         #include an/other.rc
-
-     * Substitute environment variables when available:
-
-         tm.dir : ${HOME}/TM5/cy3
-
-     * Substitute values of other keys in a line:
-
-         build.dir            :  ${tm.dir}/build
-         grid                 :  glb300x200
-         input.${grid}.path   :  /data/input/${grid}
-
-       Substitions are allowed in both key names as well as values.
-       The substitutions are performed in a loop until nothing
-       has to be substituted anymore, or some substitutions could
-       not be applied at al; for the later an error is raised.
-       Values to be substituted could therefore be set before and
-       after they are used.
-
-       Note that if a key has the same name as an environment variable,
-       the new value will be assigned to the key instead of the value
-       retrieved from the environment:
-
-         HOME      :  /some/other/dir/
-
-     * Substitude some specials:
-
-         ${pid}     # evaluates to the current process id; 
-                    # useful for names of log files etc
-         ${script}  # evaluates to the base name of the calling script, 
-                    # thus without .py etc
-                    
-     * Instead of variables of the form '${..}' other patterns could be
-       specified with the optional 'marks' tupple (see below).
-
-     * Old-style '#eval' lines are still supported:
-
-         #eval RUNDIR = /path/to/mydir
-         tmdir : ${RUNDIR}/TM5/cy3
-
-       In this example, the value of RUNDIR will be evaluated and substituted 
-       in all {key,value} pairs. This feature is obsolete and a warning will 
-       be issued. The proper way to use this is with {key,value} pairs too:
-
-         run.dir   : /path/to/mydir
-         tmdir     : ${run.dir}/TM5/cy3
-         
-     * Comment starting with '!' is stripped from the values.
-       To have a value including exclamation marks, use '\!' but do
-       not expect that the rest of the value is scanned for comment too:
-       
-           my.value      :   -999    ! just an integer value
-           my.message    :   This value has 64 characters \! Count if you don't believe it ...
-
-     * If you trust yourself you might try to use conditional expressions:
-     
-           #if ${my.number} == 1
-           message    : Welcome
-           #else
-           message    : Whatever ...
-           #endif
-           
-       The conditions should be valid python expressions that evaluate to a boolean;
-       value substitutions are performed before evaluation. Examples:
-
-                ${my.runmode} == 4
-                "${my.tracer}" == "CH4"
-
-       Keep it simple! Very complicated and nested if-statements might not be
-       resolved correctly, and are in any case not easy to understand for other users!
-       
-       In the example above, an exception could be raised by the special error expression;
-       everything behind the '#error' mark is displayed as an error message:
-       
-            #error No settings provided for value : ${my.value}
-
-
-USAGE AS SCRIPT
-
-    Called in script form, the following syntaxis is supported:
-    
-       rc.py [options] <rcfile> <key>
-       rc.py -h|--help
-       
-    The <rcfile> is read and the value defined by <key> is printed
-    to the standard output.
-
-    Use the --help option for more documentation.
-
-    
-USAGE AS PYTHON MODULE
-
-    Import the module with:
-    
-        import rc
-
-    Initialiase by reading all settings in a rcfile,
-    supporting the functionality described in the 'RCFILES' section.
-
-        rcf = RcFile( 'settings.rc' )
-
-    The initialisation accepts some optional arguments.
-    Set the silent flag to True to ignore warnings.
-
-        rcf = RcFile( 'settings.rc', silent=False )
-
-    Use the optional 'marks' tupple to define that variables to be expanded
-    are marked other than '${..}' but rather '<mark1>..<mark2>' :
-
-        rcf = RcFile( 'settings.rc', marks=('${','}') )
-
-    Test to see if a key is defined:
-
-        if rcf.has_key('my.flag') :
-            print 'value of my flag is : ', rcf['my.flag']
-    
-    Extract a list with all keys:
-    
-        rcf.keys()
-
-    A 'get' function is provided to extract values:
-
-     * by default, the 'get' function returns the value as a str type:
-
-         s = rcf.get('my.value')
-    
-     * a second argument is the name of the python type to which
-       the value is converted to:
-    
-         i = rcf.get('my.flag','int')
-
-     * if the return value should be a 'bool', the result is
-         True  for values     : 'True' , 'T', 'yes', or '1' ,
-         and False for value  : 'False', 'F', 'no' , or '0' ;
-       for other values an error is raised;
-     
-     * return a default value if the key is not found:
-
-            rcf.get( 'my.flag', default=False )
-    
-     * print a debug message to the logging system for each extracted key:
-     
-            rcf.get( 'my.flag', verbose=True ) 
-
-    Add a new value, comment is optional:
-
-        rcf.add( 'my.iter', 2, comment='iteration number for restart' )
-
-    Assign a new value to an existing key:
-
-        rcf.replace( 'my.flag', True )
-
-    Scan a character line for all occurances of ${<key>} and subsitute for
-    the rc value assigned to <key> :
-
-        line = rcf.substitute( line )
-
-    Write the dictionary (with all variables expanded and included files included)
-    to new file:
-
-         rcf.write('newfile.rc')
-         
-
-HISTORY
-
-    2008? Andy Jacobson, NOAA
-      Translation to python of original shell script 'go_readrc' .
-    2009-06 Wouter Peters, WUR
-      Support substitution of previously defined variables.
-    2009-06 Arjo Segers, TNO
-      Support include files.
-    2009-09 Arjo Segers, TNO
-      Re-coded into class.
-      Implemented substitution loop.
-    2009-11 Arjo Segers, JRC
-      Added main program to run this file as a shell script.
-      Added replace and substitute routines.
-    2010-03 Arjo Segers, JRC
-      Support simple if-statements.
-      Support comment in values.
-
-"""
-
-
-# ------------------------------------------------
-# classes
-# ------------------------------------------------
-
-
-class RcFile(object) :
-
-    """
-    Class to store settings read from a rcfile.
-    """   
-
-    def __init__( self, filename, silent=False, marks=('${','}') ) :
-
-        """ 
-        
-        Usage:
-        
-          rcf = RcFile( 'settings.rc' [,silent=False] [marks=('${','}')] )
-
-        Read an rc-file and expand all the keys and values into a dictionary.
-        Do not shout messages if silent is set to True. 
-        The 2-item tupple (mark1,mark2) could be used to re-define the default
-        key pattern '${..}' into something else:
-          <mark1>...<mark2>
-
-        """
-
-        # external:
-        import re
-        import os
-        import sys
-        import logging
-        
-        # info ...
-        logging.debug( 'reading rcfile %s ...' % filename )
-
-        # check ...
-        if not os.path.exists(filename) :
-            msg = 'rcfile not found : %s' % filename ; logging.error(msg)
-            raise IOError, msg
-        #endif
-        
-        # store file name:
-        self.filename = filename
-        # store rc-file root directory:
-        self.rootdir = os.path.split(filename)[0]
-        
-        # storage for processed rcfile:
-        self.outfile = []
-        
-        # storage for key/value pairs:
-        self.values = {}
-        
-        # open the specified rc-file:
-        f = open(filename,'r')
-        # store all lines in a list:
-        inpfile = f.readlines()
-        # close:
-        f.close()
-
-        # flags:
-        warned_for_eval = False
-        
-        # pass counter:
-        ipass = 1
-        
-        # loop until all substitutions and inclusions are done:
-        while True :
-        
-            # start again with empty output file:
-            self.outfile = []
-            # init current line:
-            line = ''
-            # assume nothing has to be done after this loop:
-            something_done = False
-            something_to_be_done = False
-            unresolved = []
-                        
-            # stack for conditional evaluation;
-            # each element is a tuple with elements:
-            #   resolved (boolean) true if the if-statement is evaluated
-            #   flag     (boolean) true if the lines below the statement 
-            #              are to be included
-            #   anyflag    (boolean) used to check if any of the 'if' or 'elif' conditions
-            #              in this sequence evaluated to True
-            #   line     (char) description of the line for messages
-            ifstack = []
-
-            #print ''
-            #print '---[pass %i]-------------------------------------' % ipass
-            #for line in inpfile : print line.strip()
-            
-            # loop over lines in input file:
-            iline = -1
-            for inpline in inpfile :
-            
-                # line counter:
-                iline = iline + 1
-                
-                # remove end-of-line character:
-                inpline = inpline.strip()
-                
-                ## DEBUG: display current line ...
-                #print '%4i | %s' % (iline,inpline)
-                
-                #
-                # empty lines
-                #
-
-                # skip empty lines:
-                if len(inpline) == 0 :
-                    # add empty line to output:
-                    self.outfile.append('\n')
-                    # next will be a new line:
-                    line = ''
-                    # next input line:
-                    continue
-                #endif
-                
-                #
-                # comment lines
-                #
-
-                # skip comment:
-                if inpline.startswith('!') :
-                    # add copy to output file:
-                    self.outfile.append( '%s\n' % inpline )
-                    # next will be a new line:
-                    line = ''
-                    # next input line:
-                    continue
-                #endif
-                
-                #
-                # continuation lines
-                #
-
-                # current line has continuation mark '\' at the end ?
-                # then add this input line:
-                if line.endswith('\\') :
-                    # remove continuation character, add input line:
-                    line = line[:-1]+inpline
-                else :
-                    # bright new line:
-                    line = inpline
-                #endif
-
-                # continuation mark ? then next line of input file:
-                if line.endswith('\\') : continue
-                
-                #
-                # line info
-                #
-                
-                # line number and text for messages:
-                line_info = '%6i | %s' % (iline+1,line)
-                
-                #
-                # conditional settings (1)
-                #
-                
-                # is this the begin of a new condition ?
-                mark = '#if'
-                if line.startswith(mark) :
-                    # push temporary flag to stack, will be replaced after evaluation of condition:
-                    ifstack.append( ( False, True, False, line_info ) )
-                    # debug ...
-                    #print 'xxx1 ', ifstack
-                #endif
-
-                mark = '#elif'
-                if line.startswith(mark) :
-                    # check ...
-                    if len(ifstack) == 0 :
-                        logging.error( 'found orphin elif in rcfile on line :' )
-                        logging.error( '  %s' % line_info )
-                        raise Exception
-                    #endif
-                    # remove current top from stack:
-                    resolved,flag,anyflag,msg = ifstack.pop()
-                    # push temporary flag to stack, will be replaced after evaluation of condition:
-                    ifstack.append( ( resolved, True, anyflag, line_info ) )
-                    # debug ...
-                    #print 'xxx1 ', ifstack
-                #endif
-
-                mark = '#else'
-                if line.startswith(mark) :
-                    # check ...
-                    if len(ifstack) == 0 :
-                        logging.error( 'found lonely else in rcfile on line :' )
-                        logging.error( '  %s' % line_info )
-                        raise Exception
-                    #endif
-                    # remove current top from stack:
-                    resolved,flag,anyflag,msg = ifstack.pop()
-                    # get higher level settings:
-                    if len(ifstack) > 0 :
-                        resolved_prev,flag_prev,anyflag_prev,msg_prev = ifstack[-1]
-                    else :
-                        flag_prev = True
-                    #endif
-                    # should next lines be included ?
-                    new_flag = (not flag) and (not anyflag) and flag_prev
-                    # add else block with reversed flag, take into acount higher level
-                    ifstack.append( ( resolved, new_flag, False, line_info ) )
-                    # debug ...
-                    #print 'xxx1 ', ifstack
-                    # copy to output:
-                    self.outfile.append( '%s\n' % line )
-                    # next input line:
-                    continue
-                #endif
-
-                # is this the end of a condition ?
-                mark = '#endif'
-                if line.startswith(mark) :
-                    # check ...
-                    if len(ifstack) == 0 :
-                        logging.error( 'found lonely endif in rcfile on line :' )
-                        logging.error( '  %s' % line_info )
-                        raise Exception
-                    #endif
-                    # remove top from stack:
-                    top = ifstack.pop()
-                    # copy to output:
-                    self.outfile.append( '%s\n' % line )
-                    # next input line:
-                    continue
-                #endif
-
-                # within if-statements ?
-                if len(ifstack) > 0 :
-                    # extract current top:
-                    resolved,flag,anyflag,msg = ifstack[-1]
-                    # already resolved ? then check if this line should be skipped:
-                    if resolved and (not flag) :
-                        # skip this line, but include commented version in output:
-                        self.outfile.append( '!%s\n' % line )
-                        # read the next input line:
-                        continue
-                    #endif
-                #endif
-                
-                #
-                # handle '#eval' lines
-                #
-
-                mark = '#eval'        
-                if line.startswith(mark):
-                    # info ..
-                    if not warned_for_eval :
-                        if not silent: logging.warning( 'the #eval statements in rc-files are deprecated, use {key:value} pairs instead' )
-                        warned_for_eval = True
-                    #endif
-                    # add commented copy to output:
-                    self.outfile.append( '!evaluated>>> '+line )
-                    # remove leading mark:
-                    line = line.lstrip(mark)
-                    # multiple settings are seperated by ';' :
-                    evals = line.split(';')
-                    # insert in output file:
-                    for k in range(len(evals)) :
-                        # split in key and value:
-                        key,value = evals[k].split('=')
-                        # insert:
-                        self.outfile.append( '%s : %s' % (key,value) )
-                    #endfor
-                    # next input line:
-                    continue
-                #endif
-
-                #
-                # replace ${..} values
-                #
-                
-                # ensure that common marks are evaluated correctly:
-                start_mark = marks[0].replace('{','\{').replace('<','\<').replace('$','\$')
-                close_mark = marks[1].replace('}','\}').replace('>','\>')
-        
-                # set syntax of keywords to be matched, e.g. '${...}' :
-                pattern = start_mark+'[A-Za-z0-9_.]+'+close_mark
-
-                # make a regular expression that matches all variables:
-                rc_varpat = re.compile( pattern )
-
-                # search all matching paterns:
-                pats = re.findall(rc_varpat,line)
-                # counter for unexpanded substitutions:
-                ntobedone = 0
-                # loop over matches:
-                for pat in pats :
-                    # remove enclosing characters:
-                    key = pat.lstrip(start_mark).rstrip(close_mark)
-                    # test some dictionaries for matching key:
-                    if self.values.has_key(key) :
-                        # get previously defined value:
-                        val = self.values[key]
-                        # substitute value:
-                        line = line.replace(pat,val)
-                        # set flag:
-                        something_done = True
-                    elif os.environ.has_key(key) :
-                        # get value from environment:
-                        val = os.environ[key]
-                        # substitute value:
-                        line = line.replace(pat,val)
-                        # set flag:
-                        something_done = True
-                    elif key == 'pid' :
-                        # special value: process id; convert to character:
-                        val = '%i' % os.getpid()
-                        # substitute value:
-                        line = line.replace(pat,val)
-                        # set flag:
-                        something_done = True
-                    elif key == 'script' :
-                        # special value: base name of the calling script, without extension:
-                        script,ext = os.path.splitext(os.path.basename(sys.argv[0]))
-                        # substitute value:
-                        line = line.replace(pat,script)
-                        # set flag:
-                        something_done = True
-                    else :
-                        # could not substitute yet; set flag:
-                        ntobedone = ntobedone + 1
-                        # continue with next substitution:
-                        continue
-                    #endif
-                #endfor  # matched patterns
-                # not all substituted ?
-                if ntobedone > 0 :
-                    # add line to output:
-                    self.outfile.append(line)
-                    # a new pass is needed:
-                    something_to_be_done = True
-                    # store for info messages:
-                    unresolved.append(line)
-                    # next input line:
-                    continue
-                #endif
-
-                #
-                # handle '#include' lines
-                #
-
-                mark = '#include'
-                if line.startswith(mark) :
-                    # remove leading mark, what remains is file to be included:
-                    inc_file = line.lstrip(mark).strip()
-                    # check ...
-                    if not os.path.exists(inc_file) :
-                        inc_file = os.path.join(self.rootdir,inc_file)
-                        logging.debug( 'Added rootdir to requested include: %s' % inc_file )
-
-                    if not os.path.exists(inc_file) :
-                        logging.error( 'include file not found : %s' % inc_file )
-                        msg =  'ERROR - include file not found : %s' % inc_file
-                        raise IOError,msg
-                    #endif
-                    # read content:
-                    inc_f = open( inc_file, 'r' )
-                    inc_rc = inc_f.readlines()
-                    inc_f.close()
-                    # add extra comment for output file:
-                    self.outfile.append( '! >>> %s >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n' % inc_file )
-                    self.outfile.extend( inc_rc )
-                    self.outfile.append( '! <<< %s <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n' % inc_file )
-                    # set flag:
-                    something_done = True
-                    # a new pass is needed:
-                    something_to_be_done = True
-                    # next input line:
-                    continue
-                #endif
-
-
-                #
-                # conditional settings (2)
-                #
-
-                # evaluate conditional expressions:
-                mark1 = '#if'
-                mark2 = '#elif'
-                if line.startswith(mark1) or line.startswith(mark2) :
-                    # remove leading mark, what remains is logical expression:
-                    expression = line.lstrip(mark1).strip()
-                    expression = line.lstrip(mark2).strip()
-                    # common mistake is to add a ':' as in python; remove this:
-                    if expression.endswith(':') : expression = expression.rstrip(':').strip()
-                    # evaluate:
-                    try :
-                        flag = eval( expression )
-                    except :
-                        logging.error( 'could not evaluate expression:' )
-                        logging.error( '    %s' % expression )
-                        logging.error( 'on line:' )
-                        logging.error( line_info )
-                        sys.exit(1)
-                    #endtry
-                    # remove temporary top added before during this pass:
-                    tmp_statement,tmp_flag,tmp_anyflag,tmp_msg = ifstack.pop()
-                    # extract current top if necessary:
-                    if len(ifstack) > 0 :
-                        dummy_statement,prev_flag,dummy_anyflag,dummy_msg = ifstack[-1]
-                    else :
-                        prev_flag = True
-                    #endif
-                    # should next lines be included ?
-                    new_flag = prev_flag and tmp_flag and flag
-                    # any if/elif evaluated to true in this sequence ?
-                    new_anyflag = tmp_anyflag or new_flag
-                    # add to stack, now resolved, take into accout current flag:
-                    ifstack.append( ( True, new_flag, new_anyflag, line_info ) )
-                    # debug ...
-                    #print 'xxx2 ', ifstack
-                    # copy to output:
-                    self.outfile.append( '%s\n' % line )
-                    # next input line:
-                    continue
-                #endif
-
-                #
-                # error message
-                #
-                
-                # special command to rais an exception:
-                mark = '#error'
-                if line.startswith(mark) :
-                    # remove leading mark, what remains is error message:
-                    msg = line.lstrip(mark).strip()
-                    # display:
-                    logging.error( msg )
-                    # add info:
-                    logging.error( 'error message found on line:' )
-                    logging.error( line_info )
-                    # stop:
-                    raise Exception
-                #endif
-
-                #
-                # checks
-                #
-
-                # common mistake ...
-                if line.startswith('#') :
-                    logging.error( 'line in rcfile starts with "#" but is not an "#include" or other special line;' )
-                    logging.error( 'if it is supposed to be comment, please start with "!" ...' )
-                    logging.error( '  rcfile   : %s' % filename )
-                    logging.error( '  line     : %s' % line )
-                    raise IOError
-                #endif
-
-                # check ...
-                if ':' not in line :
-                    logging.error( 'key/value line should contain a ":"' )
-                    logging.error( '  rcfile   : %s' % filename )
-                    logging.error( '  line     : %s' % line )
-                    raise IOError
-                #endif
-
-                #
-                # add to output
-                #
-
-                # add line to output:
-                self.outfile.append( '%s\n' % line )
-
-                #
-                # add key/value pair
-                #
-                
-                # not if inside an unresolved if-statement ...
-                if len(ifstack) > 0 :
-                    # get top values:
-                    resolved,flag,anyflag,msg = ifstack[-1]
-                    # not resolved yet ? then continue:
-                    if not resolved : continue
-                #endif
-                
-                # split in key and value; 
-                # value might contain ':' too, so at maximum 1 split:
-                key,val = line.split(':',1)
-                
-                # remove comment from value:
-                if '!' in val :
-                    # not if '\!' is in the value ...
-                    if not '\!' in val : val,comment = val.split('!')
-                    # replace all slash-comments:
-                    val = val.replace('\!','!')
-                #endif
-
-                # remove spaces:
-                key = key.strip()
-                val = val.strip()
-
-                # already defined ?
-                if self.values.has_key(key) :
-                    # no problem if values are the same, but otherwise ...
-                    if self.values[key] != val :
-                        logging.error( 'key found twice in "%s" :' % filename )
-                        logging.error( '  %s  : %s' % (key,str(self.values[key])) )
-                        logging.error( '  %s  : %s' % (key,str(val)) )
-                        raise Exception
-                    #endif
-                else :
-                    # store new value:
-                    self.values[key] = val
-                    # set flag:
-                    something_done = True
-                #endif
-
-                # display key and value ...
-                #print '                                --> %s : %s' % (key,val)
-
-            #endfor  # loop over lines in text
-            
-            ## info ...
-            #print '~~~ outfile ~~~~~~~~~~~~~~~~~~~~~~~'
-            #for line in self.outfile : print line.strip()
-            #print '~~~ key/values ~~~~~~~~~~~~~~~~~~~~'
-            #for k,v in self.iteritems() :
-            #    print '%s  :  %s' % (k,v)
-            ##endfor
-            #print '-------------------------------------------------'
-            #print ''
-            
-            # check ...
-            if len(ifstack) > 0 :
-                logging.error( 'unterminated if-statement ; current stack:' )
-                for resolved,flag,anyflag,msg in ifstack : logging.error( msg )
-                logging.error( 'please fix the rcfile or debug this script ...' )
-                raise Exception
-            #endif
-
-            # check ...
-            if something_to_be_done :
-                # check for unterminated loop ...
-                if not something_done :
-                    logging.error( 'could not resolve the following lines in rcfile:' )
-                    for uline in unresolved : logging.error( '    %s' % uline )
-                    logging.error( 'please fix the rcfile or debug this script ...' )
-                    raise Exception
-                #endif
-            else :
-                # finished ...
-                break
-            #endif
-            
-            # for safety ...
-            if ipass == 100 :
-                logging.error( 'resolving rc file has reached pass %i ; something wrong ?' % ipass )
-                raise Exception
-            #endif
-            
-            # new pass:
-            ipass = ipass + 1
-            # renew input:
-            inpfile = self.outfile
-            
-        #endwhile   # something to be done
-        
-    #enddef  # __init__
-    
-    
-    # ***
-    
-    
-    def has_key( self, key ) :
-    
-        # from dictionairy:
-        return self.values.has_key(key)
-        
-    #enddef
-    
-    
-    # ***
-    
-    
-    def keys( self ) :
-    
-        # from dictionairy:
-        return self.values.keys()
-        
-    #enddef
-    
-    
-    # ***
-
-
-    def get( self, key, totype='', default=None, verbose=False ) :
-    
-        """
-        rcf.get( 'my.value' [,default=None] )
-        Return element 'key' from the dictionairy.
-        If the element is not present but a default is specified, than return
-        the default value.
-        If 'verbose' is set to True, then print debug messages to the logging
-        about which values is returned for the given key.
-        The option argument 'totype' defines the conversion to a Python type.
-        If 'totype' is set to 'bool', the return value is the
-        boolean True for values 'T', 'True', 'yes', and '1',
-        and False for 'F', 'False', 'no', or '0' ;
-        for other values, an exception will be raised.
-        """
-        
-        # external:
-        import logging
-        
-        # element found ?
-        if self.values.has_key(key) :
-            # copy value:
-            value = self.values[key]
-            # convert ?
-            if totype == 'bool' :
-                # convert to boolean:
-                if value in ['T','True','yes','1'] :
-                    value = True
-                elif value in ['F','False','no','0'] :
-                    value = False
-                else :
-                    logging.error( "value of key '%s' is not a boolean : %s" % (key,str(value)) )
-                    raise Exception
-                #endif
-            elif len(totype) > 0 :
-                # convert to other type ...
-                value = eval( '%s(%s)' % (totype,value) )
-            #endif
-            # for debugging ...
-            if verbose : logging.debug( 'rc setting "%s" : "%s"' % (key,str(value)) )
-        else :
-            # default value specified ?
-            if default != None :
-                # copy default:
-                value = default
-                # for debugging ...
-                if verbose : logging.debug( 'rc setting "%s" : "%s" (deault)' % (key,str(value)) )
-            else :
-                # something wrong ...
-                logging.error( "key '%s' not found in '%s' and no default specified" % (key,self.filename) )
-                raise Exception
-            #endif
-        #endif
-        
-        # ok
-        return value
-        
-    #enddef
-    
-    
-    # ***
-    
-    
-    def replace( self, key, val ) :
-    
-        """
-        Replace a key by a new value.
-        """
-        
-        # external:
-        import logging
-        
-        # search for a line '<key>   : <val>' 
-        # loop over lines in output file:
-        found = False
-        for iline in range(len(self.outfile)) :
-            # extract:
-            line = self.outfile[iline]
-            # skip lines that are no key:value pair for sure ...
-            if ':' not in line : continue
-            # split once at first ':'
-            k,v = line.split(':',1)
-            # match ?
-            if k.strip() == key :
-                # replace line in original file:
-                self.outfile[iline] = '%s : %s\n' % (k,str(val))
-                # replace value:
-                self.values[key] = val
-                # set flag:
-                found = True
-                # found, thus no need to continue:
-                break
-            #endif
-        #endfor  # lines
-        # not found ?
-        if not found :
-            logging.error( 'could not replace key : %s' % key )
-            raise Exception
-        #endif
-        
-        # ok
-        return
-    
-    #enddef
-    
-    
-    # ***
-    
-    
-    def add( self, key, val, comment='' ) :
-    
-        """Add a new key/value pair."""
-        
-        # add lines:
-        self.outfile.append( '\n' )
-        if len(comment) > 0 : self.outfile.append( '! %s\n' % comment )
-        self.outfile.append( '%s : %s\n' % (key,str(val)) )
-
-        # add to dictionairy:
-        self.values[key] = val
-        
-        # ok
-        return
-    
-    #enddef
-    
-    
-    # ***
-    
-    
-    def substitute( self, line, marks=('${','}') ) :
-    
-        """
-        Return a line with all '${..}' parts replaced by the corresponding rcfile values.
-        The 2-item tupple (mark1,mark2) could be used to re-define the default
-        key pattern '${..}' into something else:
-          <mark1>...<mark2>
-        """
-        
-        # external:
-        import re
-        
-        # ensure that common marks are evaluated correctly:
-        start_mark = marks[0].replace('{','\{').replace('<','\<').replace('$','\$')
-        close_mark = marks[1].replace('}','\}').replace('>','\>')
-
-        # set syntax of keywords to be matched, e.g. '${...}' :
-        pattern = start_mark+'[A-Za-z0-9_.]+'+close_mark
-
-        # make a regular expression that matches all variables:
-        rc_varpat = re.compile( pattern )
-
-        # search all matching paterns:
-        pats = re.findall(rc_varpat,line)
-        # loop over matches:
-        for pat in pats :
-            # remove enclosing characters:
-            key = pat.lstrip(start_mark).rstrip(close_mark)
-            # test dictionary for matching key:
-            if self.values.has_key(key) :
-                # get previously defined value:
-                val = self.values[key]
-                # substitute value:
-                line = line.replace(pat,val)
-            #endif
-        #endfor  # matched patterns
-
-        # ok
-        return line
-        
-    #enddef
-    
-
-    # ***
-
-
-    def WriteFile( self, filename ) :
-
-        """ write the dictionary to file"""
-
-        # open file for writing:
-        f = open(filename,'w')
-
-        ## loop over key/value pairs:
-        #for k,v in self.iteritems():
-        #    # add line; at least the specified number of characters 
-        #    # is used for the key:
-        #    f.write( '%-20s:%s\n' % (k,v) )
-        ##endfor
-
-        # write processed input:
-        f.writelines( self.outfile )
-        
-        # close file:
-        f.close()
-        
-    #endif
-    
-
-#endclass    # RcFile
-
-def read(rcfilename,silent=False):
-    """ 
-        This method reads an rc-file by making an instance of the RcFile class, and then returns the dictionary of values only. This
-        makes it backwards compatible with older implementations of the rc.py module
-    """
-
-    rcdict = RcFile(rcfilename,silent=silent)
-
-    return rcdict.values
-
-def write( filename, rcdict):
-    """
-        This method writes an rc-file dictionary. This is included to make this module backwards compatible with 
-        older implementations of the rc.py module
-    """
-
-    # open file for writing:
-    f = open(filename,'w')
-
-    # loop over key/value pairs:
-    for k,v in rcdict.items():
-        # add line; at least the specified number of characters 
-        # is used for the key:
-        f.write( '%-20s:%s\n' % (k,v) )
-    #endfor
-
-    # close file:
-    f.close()
-
-
-
-# ------------------------------------------------
-# test
-# ------------------------------------------------
-
-
-if __name__ == '__main__':
-
-    # external ...
-    import sys
-    import optparse
-    import logging
-    
-    # extract arguments from sys.argv array:
-    #   0 = name of calling script, 1: = actual arguments
-    args = sys.argv[1:]
-    
-    # set text for 'usage' help line:
-    usage = "\n    %prog <rcfile> <key> [-b|--bool] [--default<=value>]\n    %prog <rcfile> -w|--write\n    %prog -h|--help\n    %prog -d|--doc"
-
-    # initialise the option parser:
-    parser = optparse.OptionParser(usage=usage)
-    
-    # define options:
-    parser.add_option( "-d", "--doc", 
-                         help="print documentation",
-                         dest="doc", action="store_true", default=False )
-    parser.add_option( "-b", "--bool", 
-                         help="return 'True' for values 'T', 'True', 'yes', or '1', and 'False' for 'F', 'False', 'no', or '0'",
-                         dest="boolean", action="store_true", default=False )
-    parser.add_option( "--default", 
-                         help="default value returned if key is not found",
-                         dest="default", action="store" )
-    parser.add_option( "-w", "--write", 
-                         help="write pre-processed rcfile",
-                         dest="write", action="store_true", default=False )
-    
-    # now parse the actual arguments:
-    opts,args = parser.parse_args( args=args )
-    
-    # print documentation ?
-    if opts.doc :
-        print __doc__
-        sys.exit(0)
-    #endif
-    
-    # recfile argument should be provided:
-    if len(args) < 1 :
-        parser.error("no name of rcfile provided\n")
-    #endif
-    # extract:
-    rcfile = args[0]
-    
-    # read rcfile in dictionary:
-    try :
-        rcf = RcFile(rcfile)
-    except :
-        logging.error( sys.exc_info()[1] )
-        sys.exit(1)
-    #endtry
-    
-    # print pre-processed file ?
-    if opts.write :
-        for line in rcf.outfile : print line.strip()
-        sys.exit(0)
-    #endif
-
-    # key argument should be provided:
-    if len(args) < 2 :
-        parser.error("no name of rckey provided\n")
-    #endif
-    # extract:
-    rckey  = args[1]
-    
-    # key present ?
-    if rcf.has_key(rckey) :
-
-        # print requested value:
-        if opts.boolean :
-            # extract value:
-            flag = rcf.get(rckey,'bool')
-            # print result:
-            if flag :
-                print 'True'
-            else :
-                print 'False'
-            #endif
-        else :
-            # extract value:
-            value = rcf.get(rckey)
-            # display:
-            print value
-        #endif
-        
-    else :
-
-        # default value provided ?
-        if opts.default != None :
-            # display:
-            print opts.default
-        else :
-            print 'ERROR - key "%s" not found in rcfile "%s" and no default specified' % (rckey,rcfile)
-            sys.exit(1)
-        #endif
-
-    #endif
-    
-#endif
-
-
-# ------------------------------------------------
-# end
-# ------------------------------------------------
-
diff --git a/da/tools/rcn.py b/da/tools/rcn.py
deleted file mode 100755
index 8e9c7423e381f5771362f69cb4e3989b693e81fe..0000000000000000000000000000000000000000
--- a/da/tools/rcn.py
+++ /dev/null
@@ -1,1301 +0,0 @@
-#! /usr/bin/env python
-# rc.py
-
-
-# ------------------------------------------------
-# help
-# ------------------------------------------------
-
-"""
-Deal with model settings in `rc` format.
-
-RCFILES
-
-    A rcfile is a text file with key/value pairs seperated by a ':', e.g.
-
-      my.flag    :  T
-      my.answer  :  42
-
-    The following functionality is supported:
-
-     * Empty lines are ignored.
-
-     * Comment lines are introduced by a '!' as first character.
-
-     * Long values could be continued at the next line after a '\' as last character.
-
-     * Include the key/value pairs from another file:
-
-         #include an/other.rc
-
-     * Substitute environment variables when available:
-
-         tm.dir : ${HOME}/TM5/cy3
-
-     * Substitute values of other keys in a line:
-
-         build.dir            :  ${tm.dir}/build
-         grid                 :  glb300x200
-         input.${grid}.path   :  /data/input/${grid}
-
-       Substitions are allowed in both key names as well as values.
-       The substitutions are performed in a loop until nothing
-       has to be substituted anymore, or some substitutions could
-       not be applied at al; for the later an error is raised.
-       Values to be substituted could therefore be set before and
-       after they are used.
-
-       Note that if a key has the same name as an environment variable,
-       the new value will be assigned to the key instead of the value
-       retrieved from the environment:
-
-         HOME      :  /some/other/dir/
-
-     * Substitude some specials:
-
-         ${pid}     # evaluates to the current process id; 
-                    # useful for names of log files etc
-         ${script}  # evaluates to the base name of the calling script, 
-                    # thus without .py etc
-                    
-     * Instead of variables of the form '${..}' other patterns could be
-       specified with the optional 'marks' tupple (see below).
-
-     * Old-style '#eval' lines are still supported:
-
-         #eval RUNDIR = /path/to/mydir
-         tmdir : ${RUNDIR}/TM5/cy3
-
-       In this example, the value of RUNDIR will be evaluated and substituted 
-       in all {key,value} pairs. This feature is obsolete and a warning will 
-       be issued. The proper way to use this is with {key,value} pairs too:
-
-         run.dir   : /path/to/mydir
-         tmdir     : ${run.dir}/TM5/cy3
-         
-     * Comment starting with '!' is stripped from the values.
-       To have a value including exclamation marks, use '\!' but do
-       not expect that the rest of the value is scanned for comment too:
-       
-           my.value      :   -999    ! just an integer value
-           my.message    :   This value has 64 characters \! Count if you don't believe it ...
-
-     * If you trust yourself you might try to use conditional expressions:
-     
-           #if ${my.number} == 1
-           message    : Welcome
-           #else
-           message    : Whatever ...
-           #endif
-           
-       The conditions should be valid python expressions that evaluate to a boolean;
-       value substitutions are performed before evaluation. Examples:
-
-                ${my.runmode} == 4
-                "${my.tracer}" == "CH4"
-
-       Keep it simple! Very complicated and nested if-statements might not be
-       resolved correctly, and are in any case not easy to understand for other users!
-       
-       In the example above, an exception could be raised by the special error expression;
-       everything behind the '#error' mark is displayed as an error message:
-       
-            #error No settings provided for value : ${my.value}
-
-
-USAGE AS SCRIPT
-
-    Called in script form, the following syntaxis is supported:
-    
-       rc.py [options] <rcfile> <key>
-       rc.py -h|--help
-       
-    The <rcfile> is read and the value defined by <key> is printed
-    to the standard output.
-
-    Use the --help option for more documentation.
-
-    
-USAGE AS PYTHON MODULE
-
-    Import the module with:
-    
-        import rc
-
-    Initialiase by reading all settings in a rcfile,
-    supporting the functionality described in the 'RCFILES' section.
-
-        rcf = RcFile( 'settings.rc' )
-
-    The initialisation accepts some optional arguments.
-    Set the silent flag to True to ignore warnings.
-
-        rcf = RcFile( 'settings.rc', silent=False )
-
-    Use the optional 'marks' tupple to define that variables to be expanded
-    are marked other than '${..}' but rather '<mark1>..<mark2>' :
-
-        rcf = RcFile( 'settings.rc', marks=('${','}') )
-
-    Test to see if a key is defined:
-
-        if rcf.has_key('my.flag') :
-            print 'value of my flag is : ', rcf['my.flag']
-    
-    Extract a list with all keys:
-    
-        rcf.keys()
-
-    A 'get' function is provided to extract values:
-
-     * by default, the 'get' function returns the value as a str type:
-
-         s = rcf.get('my.value')
-    
-     * a second argument is the name of the python type to which
-       the value is converted to:
-    
-         i = rcf.get('my.flag','int')
-
-     * if the return value should be a 'bool', the result is
-         True  for values     : 'True' , 'T', 'yes', or '1' ,
-         and False for value  : 'False', 'F', 'no' , or '0' ;
-       for other values an error is raised;
-     
-     * return a default value if the key is not found:
-
-            rcf.get( 'my.flag', default=False )
-    
-     * print a debug message to the logging system for each extracted key:
-     
-            rcf.get( 'my.flag', verbose=True ) 
-
-    Add a new value, comment is optional:
-
-        rcf.add( 'my.iter', 2, comment='iteration number for restart' )
-
-    Assign a new value to an existing key:
-
-        rcf.replace( 'my.flag', True )
-
-    Scan a character line for all occurances of ${<key>} and subsitute for
-    the rc value assigned to <key> :
-
-        line = rcf.substitute( line )
-
-    Write the dictionary (with all variables expanded and included files included)
-    to new file:
-
-         rcf.write('newfile.rc')
-
-         
-USAGE AS PYTHON MODULE - BACKWARDS COMPATIBILITY
-
-    For backwards compatibility with older implementations of the rc.py module,
-    two extra routines are available.
-    
-    To read rc-file by making an instance of the RcFile class, 
-    and to returns a dictionary of values only, use:
-            
-        rcdict = read( 'test.rc' [,silent=False] )
-        
-    Create a new rcfile and fill with key/values from a dictionary:
-
-        write( 'test.rc', rcdict )
-        
-
-HISTORY
-
-    2008? Andy Jacobson, NOAA
-      Translation to python of original shell script 'go_readrc' .
-    2009-06 Wouter Peters, WUR
-      Support substitution of previously defined variables.
-    2009-06 Arjo Segers, TNO
-      Support include files.
-    2009-09 Arjo Segers, TNO
-      Re-coded into class.
-      Implemented substitution loop.
-    2009-11 Arjo Segers, JRC
-      Added main program to run this file as a shell script.
-      Added replace and substitute routines.
-    2010-03 Arjo Segers, JRC
-      Support simple if-statements.
-      Support comment in values.
-    2010-07 Wouter Peters, WUR
-      Downgraded to work for python 2.4.3 too.
-      Added read/write routines for backwards compatibility.
-    2010-07-27 Arjo Segers, JRC
-      Maintain list with rcfile names and line numbers to be displayed
-      with error messages to identify where problematic lines are found.
-    2010-07-28 Andy Jacobson, NOAA
-      Add second dictionary of key,linetrace values to help track the 
-      provenance of #included keys (to debug multiple key instances).
-      Identify duplicate keys by checking on different source lines
-      instead of checking if the values are different.
-"""
-
-
-# ------------------------------------------------
-# classes
-# ------------------------------------------------
-
-
-class RcFile( object ) :
-
-    """
-    Class to store settings read from a rcfile.
-    """   
-
-    def __init__( self, filename, silent=False, marks=('${','}') ) :
-
-        """ 
-        
-        Usage:
-        
-          rcf = RcFile( 'settings.rc' [,silent=False] [marks=('${','}')] )
-
-        Read an rc-file and expand all the keys and values into a dictionary.
-        Do not shout messages if silent is set to True. 
-        The 2-item tupple (mark1,mark2) could be used to re-define the default
-        key pattern '${..}' into something else:
-          <mark1>...<mark2>
-
-        """
-
-        # external:
-        import re
-        import os
-        import sys
-        import logging
-        
-        # info ...
-        logging.debug( 'reading rcfile %s ...' % filename )
-
-        # check ...
-        if not os.path.exists(filename) :
-            msg = 'rcfile not found : %s' % filename ; logging.error(msg)
-            raise IOError, msg
-        #endif
-        
-        # store file name:
-        self.filename = filename
-        # store rc-file root directory:
-        self.rootdir = os.path.split(filename)[0]
-        
-        # storage for processed rcfile:
-        self.outfile = []
-        
-        # storage for key/value pairs:
-        self.values = {}
-
-        # storage for key/source file pairs:
-        self.sources = {}
-        
-        # open the specified rc-file:
-        f = open(filename,'r')
-        # store all lines in a list:
-        inpfile = f.readlines()
-        # close:
-        f.close()
-        
-        # create traceback info:
-        inptrace = []
-        for jline in range(len(inpfile)) :
-            inptrace.append( '"%s", line %-10s' % (filename,str(jline+1)) )
-        #endfor
-
-        # flags:
-        warned_for_eval = False
-        
-        # pass counter:
-        ipass = 1
-        
-        # loop until all substitutions and inclusions are done:
-        while True :
-        
-            # start again with empty output file:
-            self.outfile = []
-            # also empty traceback info:
-            self.trace = []
-            # init current line:
-            line = ''
-            # assume nothing has to be done after this loop:
-            something_done             = False
-            something_to_be_done       = False
-            # maintain list with unresolved lines (with keys that could not be evaluated yet):
-            unresolved_lines           = []
-            # maintain list with keys from which the value could not be resolved yet:
-            keys_with_unresolved_value = []
-            # maintain list with undefined keys; 
-            # some might be part of the keys_with_unresolved_value list:
-            undefined_keys             = []
-                        
-            # stack for conditional evaluation;
-            # each element is a tuple with elements:
-            #   resolved (boolean) true if the if-statement is evaluated
-            #   flag     (boolean) true if the lines below the statement 
-            #              are to be included
-            #   anyflag    (boolean) used to check if any of the 'if' or 'elif' conditions
-            #              in this sequence evaluated to True
-            #   line     (char) description of the line for messages
-            ifstack = []
-
-            #print ''
-            #print '---[pass %i]-------------------------------------' % ipass
-            #for line in inpfile : print line.strip()
-            
-            # loop over lines in input file:
-            iline = -1
-            for inpline in inpfile :
-            
-                # line counter:
-                iline = iline + 1
-                
-                # cut current traceback info from list:
-                linetrace_curr = inptrace.pop(0)
-                
-                # set full traceback info:
-                if line.endswith('\\') :
-                    # current input line is a continuation; combine:
-                    qfile,qlinenrs = linetrace.split(',')
-                    qnrs = qlinenrs.replace('lines','').replace('line','')
-                    if '-' in qnrs :
-                        qnr1,qnr2 = qnrs.split('-')
-                    else :
-                        qnr1,qnr2 = qnrs,qnrs
-                    #endif
-                    linetrace = '%s, lines %-9s' % ( qfile, '%i-%i' % (int(qnr1),int(qnr2)+1) )
-                else :
-                    # just copy:
-                    linetrace = linetrace_curr
-                #endif
-                    
-                # remove end-of-line character:
-                inpline = inpline.strip()
-                
-                ## DEBUG: display current line ...
-                #print '%4i | %s' % (iline,inpline)
-                #print '%4i | %s     %s' % (iline,inpline,linetrace)
-                
-                #
-                # empty lines
-                #
-
-                # skip empty lines:
-                if len(inpline) == 0 :
-                    # add empty line to output:
-                    self.outfile.append('\n')
-                    # add traceback info:
-                    self.trace.append( linetrace )
-                    # next will be a new line:
-                    line = ''
-                    # next input line:
-                    continue
-                #endif
-                
-                #
-                # comment lines
-                #
-
-                # skip comment:
-                if inpline.startswith('!') :
-                    # add copy to output file:
-                    self.outfile.append( '%s\n' % inpline )
-                    # add traceback info:
-                    self.trace.append( linetrace )
-                    # next will be a new line:
-                    line = ''
-                    # next input line:
-                    continue
-                #endif
-                
-                #
-                # continuation lines
-                #
-
-                # current line has continuation mark '\' at the end ?
-                # then add this input line:
-                if line.endswith('\\') :
-                    # remove continuation character, add input line:
-                    line = line[:-1]+inpline
-                else :
-                    # bright new line:
-                    line = inpline
-                #endif
-
-                # continuation mark ? then next line of input file:
-                if line.endswith('\\') : continue
-                
-                #
-                # conditional settings (1)
-                #
-
-                ## debug ...
-                #print 'xxx0 ', ifstack
-                
-                # is this the begin of a new condition ?
-                mark = '#if'
-                if line.startswith(mark) :
-                    # push temporary flag to stack, will be replaced after evaluation of condition:
-                    ifstack.append( ( False, True, False, linetrace ) )
-                    # debug ...
-                    #print 'xxx1 ', ifstack
-                #endif
-
-                mark = '#elif'
-                if line.startswith(mark) :
-                    # check ...
-                    if len(ifstack) == 0 :
-                        logging.error( 'found orphin "%s" in %s' % (mark,linetrace) )
-                        raise Exception
-                    #endif
-                    # remove current top from stack:
-                    resolved,flag,anyflag,msg = ifstack.pop()
-                    # did one of the previous #if/#elif evaluate to True already ?
-                    if resolved and anyflag :
-                        # push to stack that this line resolved to False :
-                        ifstack.append( ( True, False, anyflag, linetrace ) )
-                        # copy to output:
-                        self.outfile.append( '%s\n' % line )
-                        # add traceback info:
-                        self.trace.append( linetrace )
-                        # next input line:
-                        continue
-                    else :
-                        # push temporary flag to stack, will be replaced after evaluation of condition:
-                        ifstack.append( ( False, True, anyflag, linetrace ) )
-                    #endif
-                    ## debug ...
-                    #print 'xxx2 ', ifstack
-                #endif
-
-                mark = '#else'
-                if line.startswith(mark) :
-                    # check ...
-                    if len(ifstack) == 0 :
-                        logging.error( 'found orphin "%s" in %s' % (mark,linetrace) )
-                        raise Exception
-                    #endif
-                    # remove current top from stack:
-                    resolved,flag,anyflag,msg = ifstack.pop()
-                    # get higher level settings:
-                    if len(ifstack) > 0 :
-                        resolved_prev,flag_prev,anyflag_prev,msg_prev = ifstack[-1]
-                    else :
-                        flag_prev = True
-                    #endif
-                    # should next lines be included ? 
-                    # reverse flag, take into acount higher level:
-                    new_flag = (not flag) and (not anyflag) and flag_prev
-                    # push to stack:
-                    ifstack.append( ( resolved, new_flag, False, linetrace ) )
-                    # debug ...
-                    #print 'xxx3 ', ifstack
-                    # copy to output:
-                    self.outfile.append( '%s\n' % line )
-                    # add traceback info:
-                    self.trace.append( linetrace )
-                    # next input line:
-                    continue
-                #endif
-
-                # is this the end of a condition ?
-                mark = '#endif'
-                if line.startswith(mark) :
-                    # check ...
-                    if len(ifstack) == 0 :
-                        logging.error( 'found orphin "%s" in %s' % (mark,linetrace) )
-                        raise Exception
-                    #endif
-                    # remove top from stack:
-                    top = ifstack.pop()
-                    # copy to output:
-                    self.outfile.append( '%s\n' % line )
-                    # add traceback info:
-                    self.trace.append( linetrace )
-                    # next input line:
-                    continue
-                #endif
-
-                # within if-statements ?
-                if len(ifstack) > 0 :
-                    # extract current top:
-                    resolved,flag,anyflag,msg = ifstack[-1]
-                    # already resolved ? then check if this line should be skipped:
-                    if resolved and (not flag) :
-                        # skip this line, but include commented version in output:
-                        self.outfile.append( '!%s\n' % line )
-                        # add traceback info:
-                        self.trace.append( linetrace )
-                        # read the next input line:
-                        continue
-                    #endif
-                #endif
-                
-                #
-                # handle '#eval' lines
-                #
-
-                mark = '#eval'        
-                if line.startswith(mark):
-                    # info ..
-                    if not warned_for_eval :
-                        if not silent: logging.warning( 'the #eval statements in rc-files are deprecated, use {key:value} pairs instead' )
-                        warned_for_eval = True
-                    #endif
-                    # add commented copy to output:
-                    self.outfile.append( '!evaluated>>> '+line )
-                    # add traceback info:
-                    self.trace.append( linetrace )
-                    # remove leading mark:
-                    line = line.lstrip(mark)
-                    # multiple settings are seperated by ';' :
-                    evals = line.split(';')
-                    # insert in output file:
-                    for k in range(len(evals)) :
-                        # split in key and value:
-                        key,value = evals[k].split('=')
-                        # insert:
-                        self.outfile.append( '%s : %s' % (key,value) )
-                        # add traceback info:
-                        self.trace.append( linetrace )
-                    #endfor
-                    # next input line:
-                    continue
-                #endif
-
-                #
-                # replace ${..} values
-                #
-                
-                # ensure that common marks are evaluated correctly:
-                start_mark = marks[0].replace('{','\{').replace('<','\<').replace('$','\$')
-                close_mark = marks[1].replace('}','\}').replace('>','\>')
-        
-                # set syntax of keywords to be matched, e.g. '${...}' :
-                pattern = start_mark+'[A-Za-z0-9_.]+'+close_mark
-
-                # make a regular expression that matches all variables:
-                rc_varpat = re.compile( pattern )
-
-                # search all matching paterns:
-                pats = re.findall(rc_varpat,line)
-                # counter for unexpanded substitutions:
-                ntobedone = 0
-                # loop over matches:
-                for pat in pats :
-                    # remove enclosing characters:
-                    key = pat.lstrip(start_mark).rstrip(close_mark)
-                    # test some dictionaries for matching key:
-                    if self.values.has_key(key) :
-                        # get previously defined value:
-                        val = self.values[key]
-                        # substitute value:
-                        line = line.replace(pat,val)
-                        # set flag:
-                        something_done = True
-                    elif os.environ.has_key(key) :
-                        # get value from environment:
-                        val = os.environ[key]
-                        # substitute value:
-                        line = line.replace(pat,val)
-                        # set flag:
-                        something_done = True
-                    elif key == 'pid' :
-                        # special value: process id; convert to character:
-                        val = '%i' % os.getpid()
-                        # substitute value:
-                        line = line.replace(pat,val)
-                        # set flag:
-                        something_done = True
-                    elif key == 'script' :
-                        # special value: base name of the calling script, without extension:
-                        script,ext = os.path.splitext(os.path.basename(sys.argv[0]))
-                        # substitute value:
-                        line = line.replace(pat,script)
-                        # set flag:
-                        something_done = True
-                    else :
-                        # could not substitute yet; set flag:
-                        ntobedone = ntobedone + 1
-                        # add to list with unresolved keys:
-                        if key not in undefined_keys : undefined_keys.append(key)
-                        # continue with next substitution:
-                        continue
-                    #endif
-                #endfor  # matched patterns
-                # not all substituted ?
-                if ntobedone > 0 :
-                    # add line to output:
-                    self.outfile.append(line)
-                    # add traceback info:
-                    self.trace.append( linetrace )
-                    # a new pass is needed:
-                    something_to_be_done = True
-                    # store for info messages:
-                    unresolved_lines.append( '%s | %s' % (linetrace,line) )
-                    # could this be a 'key : value' line ?
-                    if ':' in line :
-                        # split, remove leading and end space:
-                        qkey,qvalue = line.split(':',1)
-                        qkey = qkey.strip()
-                        # assume it is indeed a key if it does not contain whitespace,
-                        # no start mark, and does not start wiht '#' :
-                        if (' ' not in qkey) and (start_mark not in qkey) and (not qkey.startswith('#')) :
-                            # add to list:
-                            if qkey not in keys_with_unresolved_value : keys_with_unresolved_value.append(qkey)
-                        #endif
-                    # next input line:
-                    continue
-                #endif
-
-                #
-                # handle '#include' lines
-                #
-
-                mark = '#include'
-                if line.startswith(mark) :
-                    # remove leading mark, what remains is file to be included:
-                    inc_file = line.lstrip(mark).strip()
-                    # check ...
-                    if not os.path.exists(inc_file) :
-                        inc_file = os.path.join(self.rootdir,inc_file)
-                        logging.debug( 'Added rootdir to requested include: %s' % inc_file )
-                    #endif
-                    if not os.path.exists(inc_file) :
-                        logging.error( 'include file not found : %s' % inc_file )
-                        logging.error( linetrace )
-                        raise IOError, 'include file not found : %s' % inc_file
-                    #endif
-                    # read content:
-                    inc_f = open( inc_file, 'r' )
-                    inc_rc = inc_f.readlines()
-                    inc_f.close()
-                    # add extra comment for output file:
-                    self.outfile.append( '! >>> %s >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n' % inc_file )
-                    self.outfile.extend( inc_rc )
-                    self.outfile.append( '! <<< %s <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n' % inc_file )
-                    # add traceback info:
-                    self.trace.append( linetrace )
-                    for jline in range(len(inc_rc)) :
-                        self.trace.append( '"%s", line %-10s' % (inc_file,str(jline+1)) )
-                    #endfor
-                    self.trace.append( linetrace )
-                    # set flag:
-                    something_done = True
-                    # a new pass is needed:
-                    something_to_be_done = True
-                    # next input line:
-                    continue
-                #endif
-
-
-                #
-                # conditional settings (2)
-                #
-
-                # evaluate conditional expressions:
-                mark1 = '#if'
-                mark2 = '#elif'
-                if line.startswith(mark1) or line.startswith(mark2) :
-                    # remove leading mark, what remains is logical expression:
-                    expression = line.lstrip(mark1).strip()
-                    expression = line.lstrip(mark2).strip()
-                    # common mistake is to add a ':' as in python; remove this:
-                    if expression.endswith(':') : expression = expression.rstrip(':').strip()
-                    # evaluate:
-                    try :
-                        flag = eval( expression )
-                    except :
-                        logging.error( 'could not evaluate expression:' )
-                        logging.error( '    %s' % expression )
-                        logging.error( 'in %s' % linetrace )
-                        raise Exception
-                    #endtry
-                    # remove temporary top added before during this pass:
-                    tmp_statement,tmp_flag,tmp_anyflag,tmp_msg = ifstack.pop()
-                    # extract current top if necessary:
-                    if len(ifstack) > 0 :
-                        dummy_statement,prev_flag,dummy_anyflag,dummy_msg = ifstack[-1]
-                    else :
-                        prev_flag = True
-                    #endif
-                    # should next lines be included ?
-                    new_flag = prev_flag and tmp_flag and flag
-                    # any if/elif evaluated to true in this sequence ?
-                    new_anyflag = tmp_anyflag or new_flag
-                    # add to stack, now resolved, take into accout current flag:
-                    ifstack.append( ( True, new_flag, new_anyflag, linetrace ) )
-                    # debug ...
-                    #print 'xxx2 ', ifstack
-                    # copy to output:
-                    self.outfile.append( '%s\n' % line )
-                    # add traceback info:
-                    self.trace.append( linetrace )
-                    # next input line:
-                    continue
-                #endif
-
-                #
-                # error message
-                #
-                
-                # special command to rais an exception:
-                mark = '#error'
-                if line.startswith(mark) :
-                    # remove leading mark, what remains is error message:
-                    msg = line.lstrip(mark).strip()
-                    # display:
-                    logging.error( msg )
-                    # add info:
-                    logging.error( 'error message in %s' % linetrace )
-                    # stop:
-                    raise Exception
-                #endif
-
-                #
-                # checks
-                #
-
-                # common mistake ...
-                if line.startswith('#') :
-                    logging.error( 'line in rcfile starts with "#" but is not an "#include" or other special line;' )
-                    logging.error( 'if it is supposed to be comment, please start with "!" ...' )
-                    logging.error( '  %s' % line )
-                    logging.error( '%s' % linetrace )
-                    raise IOError
-                #endif
-
-                # check ...
-                if ':' not in line :
-                    logging.error( 'key/value line should contain a ":"' )
-                    logging.error( '%s' % linetrace )
-                    raise IOError
-                #endif
-
-                #
-                # add to output
-                #
-
-                # add line to output:
-                self.outfile.append( '%s\n' % line )
-                # add traceback info:
-                self.trace.append( linetrace )
-
-                #
-                # add key/value pair
-                #
-                
-                # not if inside an unresolved if-statement ...
-                if len(ifstack) > 0 :
-                    # get top values:
-                    resolved,flag,anyflag,msg = ifstack[-1]
-                    # not resolved yet ? then continue:
-                    if not resolved : continue
-                #endif
-                
-                # split in key and value; 
-                # value might contain ':' too, so at maximum 1 split:
-                key,val = line.split(':',1)
-                
-                # remove comment from value:
-                if '!' in val :
-                    # not if '\!' is in the value ...
-                    if not '\!' in val : val,comment = val.split('!')
-                    # replace all slash-comments:
-                    val = val.replace('\!','!')
-                #endif
-
-                # remove spaces:
-                key = key.strip()
-                val = val.strip()
-
-                # already defined ?
-                if self.values.has_key(key) :
-                    # this will occure often after the first pass since
-                    # the keys are resolved again and again ;
-                    # therefore, only complain if this definition is read
-                    # from a different line :
-                    if linetrace != self.sources[key] :
-                        logging.error( 'duplicated key \"%s\" found:' % key)
-                        logging.error( 'first definition in %s is:' % self.sources[key])
-                        logging.error( '  %s  : %s' % (key,str(self.values[key])) )
-                        logging.error( 'second definition in %s is:' % linetrace.strip() )
-                        logging.error( '  %s  : %s' % (key,str(val)) )
-                        raise Exception
-                    #endif
-                else :
-                    # store new value:
-                    self.values[key] = val
-                    self.sources[key] = linetrace
-                    # set flag:
-                    something_done = True
-                #endif
-
-                # display key and value ...
-                #print '                                --> %s : %s, from %s' % (key,val, linetrace)
-
-            #endfor  # loop over lines in text
-            
-            ## info ...
-            #print '~~~ outfile ~~~~~~~~~~~~~~~~~~~~~~~'
-            #for line in self.outfile : print line.strip()
-            #print '~~~ key/values ~~~~~~~~~~~~~~~~~~~~'
-            #for k,v in self.iteritems() :
-            #    print '%s  :  %s' % (k,v)
-            ##endfor
-            #print '-------------------------------------------------'
-            #print ''
-            
-            # check ...
-            if len(ifstack) > 0 :
-                logging.error( 'unterminated if-statement ; current stack:' )
-                for resolved,flag,anyflag,msg in ifstack : logging.error( msg )
-                logging.error( 'please fix the rcfile or debug this script ...' )
-                raise Exception
-            #endif
-
-            # check ...
-            if something_to_be_done :
-                # check for unterminated loop ...
-                if not something_done :
-                    # list all unresolved lines:
-                    logging.error( 'Could not resolve the following lines in rcfile(s):' )
-                    logging.error( '' )
-                    for uline in unresolved_lines :
-                        logging.error( '    %s' % uline )
-                    #endfor
-                    logging.error( '' )
-                    # list all undefined keys:
-                    logging.error( '  Undefined key(s):' )
-                    logging.error( '' )
-                    for ukey in undefined_keys :
-                        # do not list them if they are undefined because the value
-                        # depends on other undefined keys:
-                        if ukey not in keys_with_unresolved_value :
-                            # display:
-                            logging.error( '    %s' % ukey )
-                            # loop over unresolved lines to see in which the key is used:
-                            for uline in unresolved_lines :
-                                # search for  '${key}' pattern:
-                                if marks[0]+ukey+marks[1] in uline :
-                                    logging.error( '      %s' % uline )
-                                #endif
-                            #endfor
-                            logging.error( '' )
-                        #endif
-                    #endfor
-                    logging.error( 'please fix the rcfile(s) or debug this script ...' )
-                    raise Exception
-                #endif
-            else :
-                # finished ...
-                break
-            #endif
-            
-            # for safety ...
-            if ipass == 100 :
-                logging.error( 'resolving rc file has reached pass %i ; something wrong ?' % ipass )
-                raise Exception
-            #endif
-            
-            # new pass:
-            ipass = ipass + 1
-            # renew input:
-            inpfile = self.outfile
-            # renew traceback:
-            inptrace = self.trace
-            
-        #endwhile   # something to be done
-        
-    #enddef  # __init__
-    
-    
-    # ***
-    
-    
-    def has_key( self, key ) :
-    
-        # from dictionairy:
-        return self.values.has_key(key)
-        
-    #enddef
-    
-    
-    # ***
-    
-    
-    def keys( self ) :
-    
-        # from dictionairy:
-        return self.values.keys()
-        
-    #enddef
-    
-    
-    # ***
-
-
-    def get( self, key, totype='', default=None, verbose=False ) :
-    
-        """
-        rcf.get( 'my.value' [,default=None] )
-        Return element 'key' from the dictionairy.
-        If the element is not present but a default is specified, than return
-        the default value.
-        If 'verbose' is set to True, then print debug messages to the logging
-        about which values is returned for the given key.
-        The option argument 'totype' defines the conversion to a Python type.
-        If 'totype' is set to 'bool', the return value is the
-        boolean True for values 'T', 'True', 'yes', and '1',
-        and False for 'F', 'False', 'no', or '0' ;
-        for other values, an exception will be raised.
-        """
-        
-        # external:
-        import logging
-        
-        # element found ?
-        if self.values.has_key(key) :
-            # copy value:
-            value = self.values[key]
-            # convert ?
-            if totype == 'bool' :
-                # convert to boolean:
-                if value in ['T','True','yes','1'] :
-                    value = True
-                elif value in ['F','False','no','0'] :
-                    value = False
-                else :
-                    logging.error( "value of key '%s' is not a boolean : %s" % (key,str(value)) )
-                    raise Exception
-                #endif
-            elif len(totype) > 0 :
-                # convert to other type ...
-                value = eval( '%s(%s)' % (totype,value) )
-            #endif
-            # for debugging ...
-            if verbose : logging.debug( 'rc setting "%s" : "%s"' % (key,str(value)) )
-        else :
-            # default value specified ?
-            if default != None :
-                # copy default:
-                value = default
-                # for debugging ...
-                if verbose : logging.debug( 'rc setting "%s" : "%s" (deault)' % (key,str(value)) )
-            else :
-                # something wrong ...
-                logging.error( "key '%s' not found in '%s' and no default specified" % (key,self.filename) )
-                raise Exception
-            #endif
-        #endif
-        
-        # ok
-        return value
-        
-    #enddef
-    
-    
-    # ***
-    
-    
-    def replace( self, key, val ) :
-    
-        """
-        Replace a key by a new value.
-        """
-        
-        # external:
-        import logging
-        
-        # search for a line '<key>   : <val>' 
-        # loop over lines in output file:
-        found = False
-        for iline in range(len(self.outfile)) :
-            # extract:
-            line = self.outfile[iline]
-            # skip lines that are no key:value pair for sure ...
-            if ':' not in line : continue
-            # split once at first ':'
-            k,v = line.split(':',1)
-            # match ?
-            if k.strip() == key :
-                # replace line in original file:
-                self.outfile[iline] = '%s : %s\n' % (k,str(val))
-                # replace value:
-                self.values[key] = val
-                # set flag:
-                found = True
-                # found, thus no need to continue:
-                break
-            #endif
-        #endfor  # lines
-        # not found ?
-        if not found :
-            logging.error( 'could not replace key : %s' % key )
-            raise Exception
-        #endif
-        
-        # ok
-        return
-    
-    #enddef
-    
-    
-    # ***
-    
-    
-    def add( self, key, val, comment='' ) :
-    
-        """Add a new key/value pair."""
-        
-        # add lines:
-        self.outfile.append( '\n' )
-        if len(comment) > 0 : self.outfile.append( '! %s\n' % comment )
-        self.outfile.append( '%s : %s\n' % (key,str(val)) )
-
-        # add to dictionairy:
-        self.values[key] = val
-        
-        # ok
-        return
-    
-    #enddef
-    
-    
-    # ***
-    
-    
-    def substitute( self, line, marks=('${','}') ) :
-    
-        """
-        Return a line with all '${..}' parts replaced by the corresponding rcfile values.
-        The 2-item tupple (mark1,mark2) could be used to re-define the default
-        key pattern '${..}' into something else:
-          <mark1>...<mark2>
-        """
-        
-        # external:
-        import re
-        
-        # ensure that common marks are evaluated correctly:
-        start_mark = marks[0].replace('{','\{').replace('<','\<').replace('$','\$')
-        close_mark = marks[1].replace('}','\}').replace('>','\>')
-
-        # set syntax of keywords to be matched, e.g. '${...}' :
-        pattern = start_mark+'[A-Za-z0-9_.]+'+close_mark
-
-        # make a regular expression that matches all variables:
-        rc_varpat = re.compile( pattern )
-
-        # search all matching paterns:
-        pats = re.findall(rc_varpat,line)
-        # loop over matches:
-        for pat in pats :
-            # remove enclosing characters:
-            key = pat.lstrip(start_mark).rstrip(close_mark)
-            # test dictionary for matching key:
-            if self.values.has_key(key) :
-                # get previously defined value:
-                val = self.values[key]
-                # substitute value:
-                line = line.replace(pat,val)
-            #endif
-        #endfor  # matched patterns
-
-        # ok
-        return line
-        
-    #enddef
-    
-
-    # ***
-
-
-    def WriteFile( self, filename ) :
-
-        """ write the dictionary to file"""
-
-        # open file for writing:
-        f = open(filename,'w')
-
-        ## loop over key/value pairs:
-        #for k,v in self.iteritems():
-        #    # add line; at least the specified number of characters 
-        #    # is used for the key:
-        #    f.write( '%-20s:%s\n' % (k,v) )
-        ##endfor
-
-        # write processed input:
-        f.writelines( self.outfile )
-        
-        # close file:
-        f.close()
-        
-    #endif
-    
-
-#endclass    # RcFile
-
-
-# ***
-
-
-def read( rcfilename, silent=False ) :
-
-    """ 
-    This method reads an rc-file by making an instance of the RcFile class, 
-    and then returns the dictionary of values only. 
-    This makes it backwards compatible with older implementations of the rc.py module
-    """
-
-    rcdict = RcFile( rcfilename, silent=silent )
-
-    return rcdict.values
-
-#enddef
-
-
-# ***
-
-
-def write( filename, rcdict ) :
-
-    """
-    This method writes an rc-file dictionary. 
-    This makes it backwards compatible with older implementations of the rc.py module
-    """
-
-    # open file for writing:
-    f = open(filename,'w')
-
-    # loop over key/value pairs:
-    for k,v in rcdict.items():
-        # add line; at least the specified number of characters 
-        # is used for the key:
-        f.write( '%-20s:%s\n' % (k,v) )
-    #endfor
-
-    # close file:
-    f.close()
-
-#enddef
-
-
-
-# ------------------------------------------------
-# script
-# ------------------------------------------------
-
-
-if __name__ == '__main__':
-
-    # external ...
-    import sys
-    import optparse
-    import logging
-    import traceback
-    
-    # extract arguments from sys.argv array:
-    #   0 = name of calling script, 1: = actual arguments
-    args = sys.argv[1:]
-    
-    # set text for 'usage' help line:
-    usage = "\n    %prog <rcfile> <key> [-b|--bool] [--default<=value>]\n    %prog <rcfile> -w|--write\n    %prog -h|--help\n    %prog -d|--doc"
-
-    # initialise the option parser:
-    parser = optparse.OptionParser(usage=usage)
-    
-    # define options:
-    parser.add_option( "-d", "--doc", 
-                         help="print documentation",
-                         dest="doc", action="store_true", default=False )
-    parser.add_option( "-v", "--verbose", 
-                         help="print information messages",
-                         dest="verbose", action="store_true", default=False )
-    parser.add_option( "-b", "--bool", 
-                         help="return 'True' for values 'T', 'True', 'yes', or '1', and 'False' for 'F', 'False', 'no', or '0'",
-                         dest="boolean", action="store_true", default=False )
-    parser.add_option( "--default", 
-                         help="default value returned if key is not found",
-                         dest="default", action="store" )
-    parser.add_option( "-w", "--write", 
-                         help="write pre-processed rcfile",
-                         dest="write", action="store_true", default=False )
-    
-    # now parse the actual arguments:
-    opts,args = parser.parse_args( args=args )
-    
-    # print documentation ?
-    if opts.doc :
-        print __doc__
-        sys.exit(0)
-    #endif
-    
-    # recfile argument should be provided:
-    if len(args) < 1 :
-        parser.error("no name of rcfile provided\n")
-    #endif
-    # extract:
-    rcfile = args[0]
-    
-    # read rcfile in dictionary:
-    try :
-        rcf = RcFile( rcfile, silent=(not opts.verbose) )
-    except :
-        if opts.verbose : logging.error( traceback.format_exc() )
-        sys.exit(1)
-    #endtry
-    
-    # print pre-processed file ?
-    if opts.write :
-        for line in rcf.outfile : print line.strip()
-        sys.exit(0)
-    #endif
-
-    # key argument should be provided:
-    if len(args) < 2 :
-        parser.error("no name of rckey provided\n")
-    #endif
-    # extract:
-    rckey  = args[1]
-    
-    # key present ?
-    if rcf.has_key(rckey) :
-
-        # print requested value:
-        if opts.boolean :
-            # extract value:
-            flag = rcf.get(rckey,'bool')
-            # print result:
-            if flag :
-                print 'True'
-            else :
-                print 'False'
-            #endif
-        else :
-            # extract value:
-            value = rcf.get(rckey)
-            # display:
-            print value
-        #endif
-        
-    else :
-
-        # default value provided ?
-        if opts.default != None :
-            # display:
-            print opts.default
-        else :
-            print 'ERROR - key "%s" not found in rcfile "%s" and no default specified' % (rckey,rcfile)
-            sys.exit(1)
-        #endif
-
-    #endif
-    
-#endif
-
-
-# ------------------------------------------------
-# end
-# ------------------------------------------------
diff --git a/da/tools/standardvariables.py b/da/tools/standardvariables.py
deleted file mode 100755
index 36f5fbce0fc3c3c69b5ffe6fb443862284206933..0000000000000000000000000000000000000000
--- a/da/tools/standardvariables.py
+++ /dev/null
@@ -1,224 +0,0 @@
-standard_variables = { 'bio_flux_prior' : {'name'        : 'bio_flux_prior',\
-                                         'units'         : 'mol m-2 s-1' ,\
-                                         'long_name'     : 'Surface flux of carbon dioxide, terrestrial vegetation, not optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'surface_carbon_dioxide_mole_flux', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'bio_flux_opt' : {'name'          : 'bio_flux_opt',\
-                                         'units'         : 'mol m-2 s-1' ,\
-                                         'long_name'     : 'Surface flux of carbon dioxide, terrestrial biosphere , optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'surface_carbon_dioxide_mole_flux', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'ocn_flux_prior' : {'name'        : 'ocn_flux_prior',\
-                                         'units'         : 'mol m-2 s-1' ,\
-                                         'long_name'     : 'Surface flux of carbon dioxide, open ocean , not optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'surface_carbon_dioxide_mole_flux', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'ocn_flux_opt' : {'name'          : 'ocn_flux_opt',\
-                                         'units'         : 'mol m-2 s-1' ,\
-                                         'long_name'     : 'Surface flux of carbon dioxide, open ocean , optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'surface_carbon_dioxide_mole_flux', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'fossil_flux_imp' : {'name'       : 'fossil_flux_imp',\
-                                         'units'         : 'mol m-2 s-1' ,\
-                                         'long_name'     : 'Surface flux of carbon dioxide, fossil fuel burning , imposed ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'surface_carbon_dioxide_mole_flux', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'fire_flux_imp' : {'name'         : 'fire_flux_imp',\
-                                         'units'         : 'mol m-2 s-1' ,\
-                                         'long_name'     : 'Surface flux of carbon dioxide, biomass burning , imposed ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'surface_carbon_dioxide_mole_flux', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'bio_flux_prior_cov' : {'name'    : 'bio_flux_prior_cov',\
-                                         'units'         : '[mol region-1 s-1]^2' ,\
-                                         'long_name'     : 'Covariance of surface flux of carbon dioxide, terrestrial vegetation , not optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : '', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'bio_flux_opt_cov' : {'name'      : 'bio_flux_opt_cov',\
-                                         'units'         : '[mol region-1 s-1]^2' ,\
-                                         'long_name'     : 'Covariance of surface flux of carbon dioxide, terrestrial vegetation , optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : '', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'ocn_flux_prior_cov' : {'name'    : 'ocn_flux_prior_cov',\
-                                         'units'         : '[mol region-1 s-1]^2' ,\
-                                         'long_name'     : 'Covariance of surface flux of carbon dioxide, open ocean , not optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : '', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'ocn_flux_opt_cov' : {'name'      : 'ocn_flux_opt_cov',\
-                                         'units'         : '[mol region-1 s-1]^2' ,\
-                                         'long_name'     : 'Covariance of surface flux of carbon dioxide, open ocean , optimized ', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : '', \
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'decimal_date' :  {'name'         : 'decimal_date',\
-                                         'units'         : 'years' ,\
-                                         'long_name'     : 'dates and times', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'date', \
-                                         'dims'          : (), \
-                                         'dtype'         : 'double', \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'date' :         {'name'          : 'date',\
-                                         'units'         : 'days since 2000-01-01 00:00:00 UTC' ,\
-                                         'long_name'     : 'UTC dates and times', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'standard_name' : 'date', \
-                                         'dims'          : (), \
-                                         'dtype'         : 'double', \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'idate' :        {'name'          : 'idate',\
-                                         'units'         : 'yyyy MM dd hh mm ss ' ,\
-                                         'long_name'     : 'integer components of date and time', \
-                                         'standard_name' : 'calendar_components', \
-                                         'comment'       : 'time-interval average, centered on times in the date axis', \
-                                         'dims'          : (), \
-                                         'dtype'         : 'int', \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'latitude' :     {'name'          : 'latitude',\
-                                         'units'         : 'degrees_north ' ,\
-                                         'long_name'     : 'latitude', \
-                                         'standard_name' : 'latitude', \
-                                         'comment'       : 'center of interval',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'longitude' :     {'name'         : 'longitude',\
-                                         'units'         : 'degrees_east ' ,\
-                                         'long_name'     : 'longitude', \
-                                         'standard_name' : 'longitude', \
-                                         'comment'       : 'center of interval',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'height' :        {'name'         : 'height',\
-                                         'units'         : 'masl ' ,\
-                                         'long_name'     : 'height_above_ground_level', \
-                                         'standard_name' : 'height_above_ground_level', \
-                                         'comment'       : 'value is meters above sea level',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'co2' :           {'name'         : 'co2',\
-                                         'units'         : 'micromol mol-1 ' ,\
-                                         'long_name'     : 'mole_fraction_of_carbon_dioxide_in_air', \
-                                         'standard_name' : 'mole_fraction_of_carbon_dioxide_in_air', \
-                                         'comment'       : '',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'meanstate' :     {'name'         : 'statevectormean',\
-                                         'units'         : 'unitless' ,\
-                                         'long_name'     : 'mean_value_of_state_vector', \
-                                         'standard_name' : 'mean_value_of_state_vector', \
-                                         'comment'       : '',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'ensemblestate':  {'name'         : 'statevectorensemble',\
-                                         'units'         : 'unitless' ,\
-                                         'long_name'     : 'ensemble_value_of_state_vector', \
-                                         'standard_name' : 'ensemble_value_of_state_vector', \
-                                         'comment'       : '',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'meanstate_prior' : {'name'       : 'statevectormean_prior',\
-                                         'units'         : 'unitless' ,\
-                                         'long_name'     : 'mean_value_of_state_vector_prior', \
-                                         'standard_name' : 'mean_value_of_state_vector_prior', \
-                                         'comment'       : '',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'ensemblestate_prior':  {'name'         : 'statevectorensemble_prior',\
-                                         'units'         : 'unitless' ,\
-                                         'long_name'     : 'ensemble_value_of_state_vector_prior', \
-                                         'standard_name' : 'ensemble_value_of_state_vector_prior', \
-                                         'comment'       : '',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'meanstate_opt' : {'name'       : 'statevectormean_opt',\
-                                         'units'         : 'unitless' ,\
-                                         'long_name'     : 'mean_value_of_state_vector_optimized', \
-                                         'standard_name' : 'mean_value_of_state_vector_opt', \
-                                         'comment'       : '',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'ensemblestate_opt':  {'name'         : 'statevectorensemble_opt',\
-                                         'units'         : 'unitless' ,\
-                                         'long_name'     : 'ensemble_value_of_state_vector_optimized', \
-                                         'standard_name' : 'ensemble_value_of_state_vector_opt', \
-                                         'comment'       : '',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                       'unknown' :      {'name'          : '',\
-                                         'units'         : '' ,\
-                                         'long_name'     : '', \
-                                         'standard_name' : '', \
-                                         'comment'       : '',\
-                                         'dims'          : (), \
-                                         'values'        : [], \
-                                         'count'         : 0 \
-                                        } , \
-                     }
-
-
-
-