Skip to content
Snippets Groups Projects
Forked from CTDAS / CTDAS
394 commits behind the upstream repository.
  • Peters, Wouter's avatar
    89844dce
    Added new aggregation routines, and expanded existing ones. The routines and their changes are: · 89844dce
    Peters, Wouter authored
    1) 1x1 fluxes
    
    Cycle fluxes are now written in cycle-by-cycle files instead of all into one file. This makes file handling easier. Also, I now 
    write the full ensemble of 1x1 fluxes to each file, resulting in much more data. The advantage is that this data can be used to post-
    aggregate to any mapping needed, including mapping of the full covariance through the ensemble.
    
    2) State fluxes
    
    State fluxes are also written per cycle for the same reasons.
    
    3) Olson fluxes
    
    mean fluxes as well as the ensemble are aggregated to 240 regions (11*19 Olson ecosystems and 30 Ocean regions)
    
    4) TransCom fluxes
    
    mean fluxes as well as the ensemble are aggregated to 23 regions. This is similar to the dedicated routine to make TransCom
    fluxes. The disadvantage is that the dedicated routine uses a staevector-to-transcom mapping which is required to be part of the 
    Statevector, whereas this is not needed for the new routine. From the ensemble, aggregated fluxes and TransCom covariance can be constructed.
    
    5) Country fluxes
    
    mean and ensemble fluxes are written for a number of large countries (USA, China, Russia, EU27, Brazil,...). This routine can in principle handle all countries of the World, but small countries get unreliable fluxes because of aggregation errors, and the uncertainty on the parameters on smaller scales.
    
    For the aggregation, several dictionary are used to map from the grid to the aggregate regions. These are pickled to make the routine faster, and provided also through SVN.
    
    
    89844dce
    History
    Added new aggregation routines, and expanded existing ones. The routines and their changes are:
    Peters, Wouter authored
    1) 1x1 fluxes
    
    Cycle fluxes are now written in cycle-by-cycle files instead of all into one file. This makes file handling easier. Also, I now 
    write the full ensemble of 1x1 fluxes to each file, resulting in much more data. The advantage is that this data can be used to post-
    aggregate to any mapping needed, including mapping of the full covariance through the ensemble.
    
    2) State fluxes
    
    State fluxes are also written per cycle for the same reasons.
    
    3) Olson fluxes
    
    mean fluxes as well as the ensemble are aggregated to 240 regions (11*19 Olson ecosystems and 30 Ocean regions)
    
    4) TransCom fluxes
    
    mean fluxes as well as the ensemble are aggregated to 23 regions. This is similar to the dedicated routine to make TransCom
    fluxes. The disadvantage is that the dedicated routine uses a staevector-to-transcom mapping which is required to be part of the 
    Statevector, whereas this is not needed for the new routine. From the ensemble, aggregated fluxes and TransCom covariance can be constructed.
    
    5) Country fluxes
    
    mean and ensemble fluxes are written for a number of large countries (USA, China, Russia, EU27, Brazil,...). This routine can in principle handle all countries of the World, but small countries get unreliable fluxes because of aggregation errors, and the uncertainty on the parameters on smaller scales.
    
    For the aggregation, several dictionary are used to map from the grid to the aggregate regions. These are pickled to make the routine faster, and provided also through SVN.
    
    
expand_fluxes.py 41.50 KiB
#!/usr/bin/env python
# expand_fluxes.py
import sys
sys.path.append('../../')
import os
import getopt
from datetime import datetime, timedelta
from da.tools.general import CreateDirs
import numpy as np
from pylab import date2num, num2date

"""
Author: Wouter Peters (Wouter.Peters@noaa.gov)

Revision History:
File created on 21 Ocotber 2008.

"""

def proceed_dialog(txt, yes=['y','yes'], all=['a','all', 'yes-to-all']):
    """ function to ask whether to proceed or not """
    response=raw_input(txt)
    if response.lower() in yes:
       return 1
    if response.lower() in all:
       return 2
    return 0

def SaveWeeklyAvg1x1Data(DaCycle, StateVector):
    """
        Function creates a NetCDF file with output on 1x1 degree grid. It uses the flux data written by the 
        :class:`~da.baseclasses.obsoperator.ObsOperator.py`, and multiplies these with the mapped parameters and
        variance (not covariance!) from the :class:`~da.baseclasses.statevector.StateVector`.
        
           :param DaCycle: a :class:`~da.tools.initexit.CycleControl` object
           :param StateVector: a :class:`~da.baseclasses.statevector.StateVector`
           :rtype: None
    """

    import da.tools.io4 as io
    import logging
    from da.analysis.tools_regions import globarea
#
    dirname     = 'data_flux1x1_weekly'
#
    dirname     = CreateDirs(os.path.join(DaCycle['dir.analysis'],dirname))
#
# Some help variables
#
    dectime0    = date2num(datetime(2000,1,1))
    dt          = DaCycle['cyclelength']
    startdate   = DaCycle['time.start'] 
    enddate     = DaCycle['time.end'] 
    nlag        = StateVector.nlag

    msg = "DA Cycle start date is %s"   % startdate.strftime('%Y-%m-%d %H:%M')      ; logging.debug(msg)
    msg = "DA Cycle end   date is %s"   % enddate.strftime('%Y-%m-%d %H:%M')        ; logging.debug(msg)

#
# Create or open NetCDF output file
#
    saveas      = os.path.join(dirname,'flux_1x1.%s.nc'% startdate.strftime('%Y-%m-%d') )
    ncf         = io.CT_CDF(saveas,'write')

#
# Create dimensions and lat/lon grid
#
    dimgrid     = ncf.AddLatLonDim()
    dimensemble = ncf.AddDim('members',StateVector.nmembers)
    dimdate     = ncf.AddDateDim()
#
# set title and tell GMT that we are using "pixel registration"
#
    setattr(ncf,'Title','CarbonTracker fluxes')
    setattr(ncf,'node_offset',1)
#
# skip dataset if already in file
#
    ncfdate = date2num(startdate)-dectime0+dt.days/2.0
    skip    = ncf.has_date(ncfdate)
    if skip:
        msg = 'Skipping writing of data for date %s : already present in file %s' % (startdate.strftime('%Y-%m-%d'),saveas) ; logging.warning(msg)
    else:
        
#
# if not, process this cycle. Start by getting flux input data from CTDAS
#
        filename = os.path.join(DaCycle['dir.output'],'flux1x1_%s_%s.nc'%(startdate.strftime('%Y%m%d%H'),enddate.strftime('%Y%m%d%H'),) )

        file                = io.CT_Read(filename,'read')
        bio                 = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.bio.flux']))
        ocean               = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.ocean.flux']))
        fire                = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.fires.flux']))
        fossil              = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.fossil.flux']))
        #mapped_parameters   = np.array(file.GetVariable(DaCycle.DaSystem['final.param.mean.1x1']))
        file.close()

        next=ncf.inq_unlimlen()[0]


# Start adding datasets from here on, both prior and posterior datasets for bio and ocn

        for prior in [True, False]:
#
# Now fill the StateVector with the prior values for this time step. Note that the prior value for this time step
# occurred nlag time steps ago, so we make a shift in the output directory, but only if we are more than nlag cycle away from the start date..
#

            if prior:
                qual_short='prior'
                for n in range(nlag,0,-1):
                    priordate       = enddate - timedelta(dt.days*n)
                    savedir         = DaCycle['dir.output'].replace(startdate.strftime('%Y%m%d'),priordate.strftime('%Y%m%d') )
                    filename        = os.path.join(savedir,'savestate.nc')
                    if os.path.exists(filename):
                        dummy                   = StateVector.ReadFromFile(filename,qual=qual_short)
                        gridmean,gridensemble   = StateVector.StateToGrid(lag=n)

# Replace the mean statevector by all ones (assumed priors)

                        gridmean                = StateVector.VectorToGrid(vectordata = np.ones(StateVector.nparams,))

                        msg = 'Read prior dataset from file %s, sds %d: '%(filename,n) ; logging.debug(msg)
                        break
            else:
                qual_short='opt'
                savedir                 = DaCycle['dir.output']
                filename                = os.path.join(savedir,'savestate.nc')
                dummy                   = StateVector.ReadFromFile(filename,qual=qual_short)
                gridmean,gridensemble   = StateVector.StateToGrid(lag=1)

                msg = 'Read posterior dataset from file %s, sds %d: '%(filename,1) ; logging.debug(msg)
#
# if prior, do not multiply fluxes with parameters, otherwise do
#
            biomapped=bio*gridmean 
            oceanmapped=ocean*gridmean 
            biovarmapped=bio*gridensemble
            oceanvarmapped=ocean*gridensemble

#
#
#  For each dataset, get the standard definitions from the module mysettings, add values, dimensions, and unlimited count, then write
#
            savedict=ncf.StandardVar(varname='bio_flux_'+qual_short)
            savedict['values']=biomapped.tolist()
            savedict['dims']=dimdate+dimgrid
            savedict['count']=next
            ncf.AddData(savedict)
#
            savedict=ncf.StandardVar(varname='ocn_flux_'+qual_short)
            savedict['values']=oceanmapped.tolist()
            savedict['dims']=dimdate+dimgrid
            savedict['count']=next
            ncf.AddData(savedict)

            savedict=ncf.StandardVar(varname='bio_flux_%s_ensemble'%qual_short)
            savedict['values']=biovarmapped.tolist()
            savedict['dims']=dimdate+dimensemble+dimgrid
            savedict['count']=next
            ncf.AddData(savedict)
#
            savedict=ncf.StandardVar(varname='ocn_flux_%s_ensemble'%qual_short)
            savedict['values']=oceanvarmapped.tolist()
            savedict['dims']=dimdate+dimensemble+dimgrid
            savedict['count']=next
            ncf.AddData(savedict)

        # End prior/posterior block

        savedict=ncf.StandardVar(varname='fire_flux_imp')
        savedict['values']=fire.tolist()
        savedict['dims']=dimdate+dimgrid
        savedict['count']=next
        ncf.AddData(savedict)
#
        savedict=ncf.StandardVar(varname='fossil_flux_imp')
        savedict['values']=fossil.tolist()
        savedict['dims']=dimdate+dimgrid
        savedict['count']=next
        ncf.AddData(savedict)

        area=globarea()
        savedict=ncf.StandardVar(varname='cell_area')
        savedict['values']=area.tolist()
        savedict['dims']=dimgrid
        ncf.AddData(savedict)
#
        savedict=ncf.StandardVar(varname='date')
        savedict['values']=date2num(startdate)-dectime0+dt.days/2.0
        savedict['dims']=dimdate
        savedict['count']=next
        ncf.AddData(savedict)

        sys.stdout.write('.')
        sys.stdout.flush()
#
#   Done, close the new NetCDF file
#
    ncf.close()
#
#   Return the full name of the NetCDF file so it can be processed by the next routine
#
    msg="Gridded weekly average fluxes now written" ; logging.info(msg)

    return saveas

def SaveWeeklyAvgStateData(DaCycle, StateVector):
    """
        Function creates a NetCDF file with output for all parameters. It uses the flux data written by the 
        :class:`~da.baseclasses.obsoperator.ObsOperator.py`, and multiplies these with the mapped parameters and
        variance (not covariance!) from the :class:`~da.baseclasses.statevector.StateVector`.
        
           :param DaCycle: a :class:`~da.tools.initexit.CycleControl` object
           :param StateVector: a :class:`~da.baseclasses.statevector.StateVector`
           :rtype: None
    """

    import da.tools.io4 as io
    import logging
    from da.analysis.tools_regions import globarea
    
#
    dirname     = 'data_state_weekly'
#
    dirname     = CreateDirs(os.path.join(DaCycle['dir.analysis'],dirname))
#
# Some help variables
#
    dectime0    = date2num(datetime(2000,1,1))
    dt          = DaCycle['cyclelength']
    startdate   = DaCycle['time.start'] 
    enddate     = DaCycle['time.end'] 
    nlag        = StateVector.nlag

    area        = globarea()
    vectorarea  = StateVector.GridToVector(griddata=area,method='sum')

    msg = "DA Cycle start date is %s"   % startdate.strftime('%Y-%m-%d %H:%M')      ; logging.debug(msg)
    msg = "DA Cycle end   date is %s"   % enddate.strftime('%Y-%m-%d %H:%M')        ; logging.debug(msg)

#
# Create or open NetCDF output file
#
    saveas      = os.path.join(dirname,'statefluxes.nc')
    ncf         = io.CT_CDF(saveas,'write')

#
# Create dimensions and lat/lon grid
#
    dimregs     = ncf.AddDim('nparameters',StateVector.nparams)
    dimmembers  = ncf.AddDim('nmembers',StateVector.nmembers)
    dimdate     = ncf.AddDateDim()
#
# set title and tell GMT that we are using "pixel registration"
#
    setattr(ncf,'Title','CarbonTracker fluxes')
    setattr(ncf,'node_offset',1)
#
# skip dataset if already in file
#
    ncfdate = date2num(startdate)-dectime0+dt.days/2.0
    skip    = ncf.has_date(ncfdate)
    if skip:
        msg = 'Skipping writing of data for date %s : already present in file %s' % (startdate.strftime('%Y-%m-%d'),saveas) ; logging.warning(msg)
    else:

        next=ncf.inq_unlimlen()[0]

#
# if not, process this cycle. Start by getting flux input data from CTDAS
#
        filename = os.path.join(DaCycle['dir.output'],'flux1x1_%s_%s.nc'%(startdate.strftime('%Y%m%d%H'),enddate.strftime('%Y%m%d%H'),) )

        file                = io.CT_Read(filename,'read')
        bio                 = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.bio.flux']))
        ocean               = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.ocean.flux']))
        fire                = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.fires.flux']))
        fossil              = np.array(file.GetVariable(DaCycle.DaSystem['background.co2.fossil.flux']))
        #mapped_parameters   = np.array(file.GetVariable(DaCycle.DaSystem['final.param.mean.1x1']))
        file.close()

        next=ncf.inq_unlimlen()[0]

        vectorbio           = StateVector.GridToVector(griddata=bio*area,method='sum')
        vectorocn           = StateVector.GridToVector(griddata=ocean*area,method='sum')
        vectorfire          = StateVector.GridToVector(griddata=fire*area,method='sum')
        vectorfossil        = StateVector.GridToVector(griddata=fossil*area,method='sum')


# Start adding datasets from here on, both prior and posterior datasets for bio and ocn

        for prior in [True, False]:
#
# Now fill the StateVector with the prior values for this time step. Note that the prior value for this time step
# occurred nlag time steps ago, so we make a shift in the output directory, but only if we are more than nlag cycle away from the start date..
#

            if prior:
                qual_short='prior'
                for n in range(nlag,0,-1):
                    priordate       = enddate - timedelta(dt.days*n)
                    savedir         = DaCycle['dir.output'].replace(startdate.strftime('%Y%m%d'),priordate.strftime('%Y%m%d') )
                    filename        = os.path.join(savedir,'savestate.nc')
                    if os.path.exists(filename):
                        dummy                   = StateVector.ReadFromFile(filename,qual=qual_short)

# Replace the mean statevector by all ones (assumed priors)

                        statemean               = np.ones((StateVector.nparams,))

                        choicelag               = n

                        msg = 'Read prior dataset from file %s, lag %d: '%(filename, choicelag) ; logging.debug(msg)
                        break
            else:
                qual_short='opt'
                savedir                 = DaCycle['dir.output']
                filename                = os.path.join(savedir,'savestate.nc')
                dummy                   = StateVector.ReadFromFile(filename)
                choicelag               = 1
                statemean               = StateVector.EnsembleMembers[choicelag-1][0].ParameterValues

                msg = 'Read posterior dataset from file %s, lag %d: '%(filename,choicelag) ; logging.debug(msg)
#
# if prior, do not multiply fluxes with parameters, otherwise do
#
            data                = statemean*vectorbio # units of mole region-1 s-1

            savedict            = ncf.StandardVar(varname='bio_flux_%s'%qual_short)
            savedict['values']  = data
            savedict['dims']    = dimdate+dimregs
            savedict['count']   = next
            ncf.AddData(savedict)

#
# Here comes a special provision for the posterior flux covariances: these are calculated relative to the prior flux covariance to
# ensure they are indeed smaller due to the data assimilation. If they would be calculated relative to the mean posterior flux, the 
# uncertainties would shift just because the mean flux had increased or decreased, which is not what we want.
#
# The implementation is done by multiplying the ensemble with the vectorbio only, and not with the statemean values
# which are assumed 1.0 in the prior always.
#

            members             = StateVector.EnsembleMembers[choicelag-1] 
            deviations          = np.array([mem.ParameterValues*vectorbio for mem in members])
            deviations          = deviations -deviations[0,:]

            savedict=ncf.StandardVar(varname='bio_flux_%s_ensemble'%qual_short)

            savedict['values']=deviations.tolist()
            savedict['dims']=dimdate+dimmembers+dimregs
            savedict['comment']="This is the matrix square root, use (M x M^T)/(nmembers-1) to make covariance"
            savedict['units']="mol region-1 s-1"
            savedict['count']=next
            ncf.AddData(savedict)

            savedict=ncf.StandardVar('unknown')
            savedict['name'] = 'bio_flux_%s_std'%qual_short
            savedict['long_name'] = 'Biosphere flux standard deviation, %s'%qual_short
            savedict['values']=deviations.std(axis=0)
            savedict['dims']=dimdate+dimregs
            savedict['comment']="This is the standard deviation on each parameter"
            savedict['units']="mol region-1 s-1"
            savedict['count']=next
            ncf.AddData(savedict)

            data                = statemean*vectorocn # units of mole region-1 s-1

            savedict=ncf.StandardVar(varname='ocn_flux_%s'%qual_short)
            savedict['values']=data
            savedict['dims']=dimdate+dimregs
            savedict['count']=next
            ncf.AddData(savedict)


#
# Here comes a special provision for the posterior flux covariances: these are calculated relative to the prior flux covariance to
# ensure they are indeed smaller due to the data assimilation. If they would be calculated relative to the mean posterior flux, the 
# uncertainties would shift just because the mean flux had increased or decreased, which is not what we want.
#
# The implementation is done by multiplying the ensemble with the vectorocn only, and not with the statemean values
# which are assumed 1.0 in the prior always.
#

            deviations          = np.array([mem.ParameterValues*vectorocn for mem in members])
            deviations          = deviations-deviations[0,:]

            savedict=ncf.StandardVar(varname='ocn_flux_%s_ensemble'%qual_short)
            savedict['values']=deviations.tolist()
            savedict['dims']=dimdate+dimmembers+dimregs
            savedict['comment']="This is the matrix square root, use (M x M^T)/(nmembers-1) to make covariance"
            savedict['units']="mol region-1 s-1"
            savedict['count']=next
            ncf.AddData(savedict)

            savedict=ncf.StandardVar('unknown')
            savedict['name'] = 'ocn_flux_%s_std'%qual_short
            savedict['long_name'] = 'Ocean flux standard deviation, %s'%qual_short
            savedict['values']=deviations.std(axis=0)
            savedict['dims']=dimdate+dimregs
            savedict['comment']="This is the standard deviation on each parameter"
            savedict['units']="mol region-1 s-1"
            savedict['count']=next
            ncf.AddData(savedict)

        data                = vectorfire

        savedict=ncf.StandardVar(varname='fire_flux_imp')
        savedict['values']=data
        savedict['dims']=dimdate+dimregs
        savedict['count']=next
        ncf.AddData(savedict)

        data                = vectorfossil

        savedict=ncf.StandardVar(varname='fossil_flux_imp')
        savedict['values']=data
        savedict['dims']=dimdate+dimregs
        savedict['count']=next
        ncf.AddData(savedict)

        savedict=ncf.StandardVar(varname='date')
        savedict['values']  = ncfdate
        savedict['dims']    = dimdate
        savedict['count']   = next
        ncf.AddData(savedict)

        sys.stdout.write('.')
        sys.stdout.flush()
#
#   Done, close the new NetCDF file
#
    ncf.close()
#
#   Return the full name of the NetCDF file so it can be processed by the next routine
#
    msg="Vector weekly average fluxes now written" ; logging.info(msg)

    return saveas


def SaveWeeklyAvgTCData(DaCycle, StateVector):
    """
        Function creates a NetCDF file with output on TransCom regions. It uses the flux input from the 
        function `SaveWeeklyAvg1x1Data` to create fluxes of length `nparameters`, which are then projected
        onto TC regions using the internal methods from :class:`~da.baseclasses.statevector.StateVector`.
        
           :param DaCycle: a :class:`~da.tools.initexit.CycleControl` object
           :param StateVector: a :class:`~da.baseclasses.statevector.StateVector`
           :rtype: None

        This function only read the prior fluxes from the flux_1x1.nc files created before, because we want to convolve 
        these with the parameters in the StateVector. This creates posterior fluxes, and the posterior covariance for the complete
        StateVector in units of mol/box/s which we then turn into TC fluxes and covariances.
    """
    from da.analysis.tools_regions import globarea
    from da.analysis.tools_transcom import transcommask
    import da.tools.io4 as io
    import logging
#
    dirname     = 'data_tc_weekly'
#
    dirname     = CreateDirs(os.path.join(DaCycle['dir.analysis'],dirname))
#
# Some help variables
#
    dectime0    = date2num(datetime(2000,1,1))
    dt          = DaCycle['cyclelength']
    startdate   = DaCycle['time.start'] 
    enddate     = DaCycle['time.end'] 
    ncfdate     = date2num(startdate)-dectime0+dt.days/2.0

    msg = "DA Cycle start date is %s"   % startdate.strftime('%Y-%m-%d %H:%M')      ; logging.debug(msg)
    msg = "DA Cycle end   date is %s"   % enddate.strftime('%Y-%m-%d %H:%M')        ; logging.debug(msg)

    # Write/Create NetCDF output file
    #
    saveas          = os.path.join(dirname,'tcfluxes.nc')
    ncf             = io.CT_CDF(saveas,'write')
    dimdate         = ncf.AddDateDim()
    dimidateformat  = ncf.AddDateDimFormat()
    dimregs         = ncf.AddRegionDim(type='tc')
#
# set title and tell GMT that we are using "pixel registration"
#
    setattr(ncf,'Title','CarbonTracker TransCom fluxes')
    setattr(ncf,'node_offset',1)
    #

    skip             = ncf.has_date(ncfdate)
    if skip:
        msg = 'Skipping writing of data for date %s : already present in file %s' % (startdate.strftime('%Y-%m-%d'),saveas) ; logging.warning(msg)
    else:

        # Get input data

        area=globarea()

        infile=os.path.join(DaCycle['dir.analysis'],'data_state_weekly','statefluxes.nc')
        if not os.path.exists(infile):
            msg="Needed input file (%s) does not exist yet, please create file first, returning..."%infile ; logging.error(msg)
            return None

        ncf_in      = io.CT_Read(infile,'read')

        # Transform data one by one

        # Get the date variable, and find index corresponding to the DaCycle date

        try:
            dates       = ncf_in.variables['date'][:]
        except KeyError:
            msg         = "The variable date cannot be found in the requested input file (%s) " % infile ; logging.error(msg)
            msg         = "Please make sure you create gridded fluxes before making TC fluxes " ; logging.error(msg)
            raise KeyError

        try:
            index       = dates.tolist().index(ncfdate)
        except ValueError:
            msg         = "The requested cycle date is not yet available in file %s "% infile ; logging.error(msg)
            msg         = "Please make sure you create state based fluxes before making TC fluxes " ; logging.error(msg)
            raise ValueError

        # First add the date for this cycle to the file, this grows the unlimited dimension

        savedict            = ncf.StandardVar(varname='date')
        savedict['values']  = ncfdate
        savedict['dims']    = dimdate
        savedict['count']   = index
        dummy               = ncf.AddData(savedict)

        # Now convert other variables that were inside the flux_1x1 file

        vardict     = ncf_in.variables
        for vname, vprop in vardict.iteritems():

            data    = ncf_in.GetVariable(vname)[index]

            if vname=='latitude': continue
            elif vname=='longitude': continue
            elif vname=='date': continue
            elif vname=='idate': continue
            elif 'std' in vname: continue
            elif 'ensemble' in vname:
              
                tcdata=[]
                for member in data:
                    tcdata.append(    StateVector.VectorToTC(vectordata=member) )

                tcdata              = np.array(tcdata)
                try:
                    cov                 = tcdata.transpose().dot(tcdata)/(StateVector.nmembers-1) 
                except:
                    cov                 = np.dot(tcdata.transpose(),tcdata)/(StateVector.nmembers-1) # Huygens fix 

                #print vname,cov.sum()

                tcdata              = cov



                savedict            = ncf.StandardVar(varname=vname.replace('ensemble','cov') )
                savedict['units']   = '[mol/region/s]**2'
                savedict['dims']    = dimdate+dimregs+dimregs
                
            else:

                tcdata              = StateVector.VectorToTC(vectordata=data) # vector to TC

                savedict            = ncf.StandardVar(varname=vname)
                savedict['dims']    = dimdate+dimregs
                savedict['units']   = 'mol/region/s'

            savedict['count']   = index
            savedict['values']  = tcdata
            ncf.AddData(savedict)

        ncf_in.close()
    ncf.close()

    msg="TransCom weekly average fluxes now written" ; logging.info(msg)

    return saveas

def SaveWeeklyAvgExtTCData(DaCycle):
    """ Function SaveTCDataExt saves surface flux data to NetCDF files for extended TransCom regions
        
        *** Inputs ***
        rundat : a RunInfo object

        *** Outputs ***
        NetCDF file containing n-hourly global surface fluxes per TransCom region

        *** Example ***
        ./expand_savestate project=enkf_release sd=20000101 ed=20010101 """

    from da.analysis.tools_transcom import ExtendedTCRegions
    import da.tools.io4 as io
    import logging
#
    dirname     = 'data_tc_weekly'
#
    dirname     = CreateDirs(os.path.join(DaCycle['dir.analysis'],dirname))
#
# Some help variables
#
    dectime0    = date2num(datetime(2000,1,1))
    dt          = DaCycle['cyclelength']
    startdate   = DaCycle['time.start'] 
    enddate     = DaCycle['time.end'] 
    ncfdate     = date2num(startdate)-dectime0+dt.days/2.0

    msg = "DA Cycle start date is %s"   % startdate.strftime('%Y-%m-%d %H:%M')      ; logging.debug(msg)
    msg = "DA Cycle end   date is %s"   % enddate.strftime('%Y-%m-%d %H:%M')        ; logging.debug(msg)

    # Write/Create NetCDF output file
    #
    saveas          = os.path.join(dirname,'tc_extfluxes.nc')
    ncf             = io.CT_CDF(saveas,'write')
    dimdate         = ncf.AddDateDim()
    dimidateformat  = ncf.AddDateDimFormat()
    dimregs         = ncf.AddRegionDim(type='tc_ext')
#
# set title and tell GMT that we are using "pixel registration"
#
    setattr(ncf,'Title','CarbonTracker TransCom fluxes')
    setattr(ncf,'node_offset',1)
    #

    skip             = ncf.has_date(ncfdate)
    if skip:
        msg = 'Skipping writing of data for date %s : already present in file %s' % (startdate.strftime('%Y-%m-%d'),saveas) ; logging.warning(msg)
    else:
        infile=os.path.join(DaCycle['dir.analysis'],'data_tc_weekly','tcfluxes.nc')
        if not os.path.exists(infile):
            msg="Needed input file (%s) does not exist yet, please create file first, returning..."%infile ; logging.error(msg)
            return None

        ncf_in      = io.CT_Read(infile,'read')

        # Transform data one by one

        # Get the date variable, and find index corresponding to the DaCycle date

        try:
            dates       = ncf_in.variables['date'][:]
        except KeyError:
            msg         = "The variable date cannot be found in the requested input file (%s) " % infile ; logging.error(msg)
            msg         = "Please make sure you create gridded fluxes before making extended TC fluxes " ; logging.error(msg)
            raise KeyError

        try:
            index       = dates.tolist().index(ncfdate)
        except ValueError:
            msg         = "The requested cycle date is not yet available in file %s "% infile ; logging.error(msg)
            msg         = "Please make sure you create state based fluxes before making extended TC fluxes " ; logging.error(msg)
            raise ValueError

        # First add the date for this cycle to the file, this grows the unlimited dimension

        savedict            = ncf.StandardVar(varname='date')
        savedict['values']  = ncfdate
        savedict['dims']    = dimdate
        savedict['count']   = index
        dummy               = ncf.AddData(savedict)

        # Now convert other variables that were inside the tcfluxes.nc file

        vardict     = ncf_in.variables
        for vname, vprop in vardict.iteritems():

            data    = ncf_in.GetVariable(vname)[index]

            if vname=='latitude': continue
            elif vname=='longitude': continue
            elif vname=='date': continue
            elif vname=='idate': continue
            elif 'cov' in vname:
              
                tcdata              = ExtendedTCRegions(data,cov=True)

                savedict            = ncf.StandardVar(varname=vname)
                savedict['units']   = '[mol/region/s]**2'
                savedict['dims']    = dimdate+dimregs+dimregs
                
            else:

                tcdata              = ExtendedTCRegions(data,cov=False)

                savedict            = ncf.StandardVar(varname=vname)
                savedict['dims']    = dimdate+dimregs
                savedict['units']   = 'mol/region/s'

            savedict['count']   = index
            savedict['values']  = tcdata
            ncf.AddData(savedict)

        ncf_in.close()
    ncf.close()

    msg="TransCom weekly average extended fluxes now written" ; logging.info(msg)

    return saveas

def SaveWeeklyAvgAggData(DaCycle, region_aggregate='olson'):
    """
        Function creates a NetCDF file with output on TransCom regions. It uses the flux input from the 
        function `SaveWeeklyAvg1x1Data` to create fluxes of length `nparameters`, which are then projected
        onto TC regions using the internal methods from :class:`~da.baseclasses.statevector.StateVector`.
        
           :param DaCycle: a :class:`~da.tools.initexit.CycleControl` object
           :param StateVector: a :class:`~da.baseclasses.statevector.StateVector`
           :rtype: None

        This function only read the prior fluxes from the flux_1x1.nc files created before, because we want to convolve 
        these with the parameters in the StateVector. This creates posterior fluxes, and the posterior covariance for the complete
        StateVector in units of mol/box/s which we then turn into TC fluxes and covariances.
    """
    from da.analysis.tools_regions import globarea, StateToGrid
    import da.analysis.tools_transcom as tc 
    import da.analysis.tools_country as ct
    import da.tools.io4 as io
    import logging

#
    dirname     = 'data_%s_weekly'%region_aggregate
#
    dirname     = CreateDirs(os.path.join(DaCycle['dir.analysis'],dirname))
#
# Some help variables
#
    dectime0    = date2num(datetime(2000,1,1))
    dt          = DaCycle['cyclelength']
    startdate   = DaCycle['time.start'] 
    enddate     = DaCycle['time.end'] 
    ncfdate     = date2num(startdate)-dectime0+dt.days/2.0

    msg = "DA Cycle start date is %s"   % startdate.strftime('%Y-%m-%d %H:%M')      ; logging.debug(msg)
    msg = "DA Cycle end   date is %s"   % enddate.strftime('%Y-%m-%d %H:%M')        ; logging.debug(msg)

    msg = "Aggregating 1x1 fluxes to %s totals"   % region_aggregate      ; logging.debug(msg)


    # Write/Create NetCDF output file
    #
    saveas          = os.path.join(dirname,'%s_fluxes.%s.nc'%(region_aggregate, startdate.strftime('%Y-%m-%d'),))
    ncf             = io.CT_CDF(saveas,'write')
    dimdate         = ncf.AddDateDim()
    dimidateformat  = ncf.AddDateDimFormat()
    dimgrid         = ncf.AddLatLonDim()  # for mask
#
#   Select regions to aggregate to
# 

    if region_aggregate == "olson":
        regionmask = tc.olson240mask
        dimname    = 'olson'
        dimregs    = ncf.AddDim(dimname,regionmask.max())

        regionnames = []
        for i in range(11):
            for j in range(19):
                regionnames.append("%s_%s"%(tc.transnams[i],tc.olsonnams[j],) )
        regionnames.extend(tc.oifnams)

        for i,name in enumerate(regionnames):
                lab='Aggregate_Region_%03d'%(i+1,)
                setattr(ncf,lab,name)

    elif region_aggregate == "transcom":
        regionmask = tc.transcommask
        dimname    = 'tc'
        dimregs    = ncf.AddRegionDim(type='tc')

    elif region_aggregate == "country":

        countrydict = ct.get_countrydict()

        selected =  ['Russia','Canada','China','United States','EU27','Brazil','Australia','India'] #,'G8','UNFCCC_annex1','UNFCCC_annex2']

        regionmask = np.zeros((180,360,),'float')

        for i,name in enumerate(selected):

            lab='Country_%03d'%(i+1,)
            setattr(ncf,lab,name)

            if name == 'EU27':
                namelist=ct.EU27
            elif name == 'EU25':
                namelist=ct.EU25
            elif name == 'G8':
                namelist=ct.G8
            elif name == 'UNFCCC_annex1':
                namelist=ct.annex1
            elif name == 'UNFCCC_annex2':
                namelist=ct.annex2
            else:
                namelist=[name]

            for countryname in namelist:
                try:
                    country=countrydict[countryname]
                    regionmask.put(country.gridnr,i+1)
                except:
                    continue

        dimname    = 'country'
        dimregs    = ncf.AddDim(dimname,regionmask.max())

    #

    skip             = ncf.has_date(ncfdate)
    if skip:
        msg = 'Skipping writing of data for date %s : already present in file %s' % (startdate.strftime('%Y-%m-%d'),saveas) ; logging.warning(msg)
    else:
        #
        # set title and tell GMT that we are using "pixel registration"
        #
        setattr(ncf,'Title','CTDAS Aggregated fluxes')
        setattr(ncf,'node_offset',1)

        savedict            = ncf.StandardVar('unknown')
        savedict['name']    = 'regionmask'
        savedict['comment'] = 'numerical mask used to aggregate 1x1 flux fields, each integer 0,...,N is one region aggregated'
        savedict['values']  = regionmask.tolist()
        savedict['units']   = '-'
        savedict['dims']    = dimgrid
        savedict['count']   = 0
        dummy               = ncf.AddData(savedict)

 


        # Get input data from 1x1 degree flux files

        area=globarea()

        infile=os.path.join(DaCycle['dir.analysis'],'data_flux1x1_weekly','flux_1x1.%s.nc'%startdate.strftime('%Y-%m-%d') )
        if not os.path.exists(infile):
            msg="Needed input file (%s) does not exist yet, please create file first, returning..."%infile ; logging.error(msg)
            return None

        ncf_in      = io.CT_Read(infile,'read')

        # Transform data one by one

        # Get the date variable, and find index corresponding to the DaCycle date

        try:
            dates       = ncf_in.variables['date'][:]
        except KeyError:
            msg         = "The variable date cannot be found in the requested input file (%s) " % infile ; logging.error(msg)
            msg         = "Please make sure you create gridded fluxes before making TC fluxes " ; logging.error(msg)
            raise KeyError

        try:
            index       = dates.tolist().index(ncfdate)
        except ValueError:
            msg         = "The requested cycle date is not yet available in file %s "% infile ; logging.error(msg)
            msg         = "Please make sure you create state based fluxes before making TC fluxes " ; logging.error(msg)
            raise ValueError

        # First add the date for this cycle to the file, this grows the unlimited dimension

        savedict            = ncf.StandardVar(varname='date')
        savedict['values']  = ncfdate
        savedict['dims']    = dimdate
        savedict['count']   = index
        dummy               = ncf.AddData(savedict)

        # Now convert other variables that were inside the statevector file

        vardict     = ncf_in.variables
        for vname, vprop in vardict.iteritems():


            if vname=='latitude': continue
            elif vname=='longitude': continue
            elif vname=='date': continue
            elif vname=='idate': continue
            elif 'std' in vname: continue
            elif 'ensemble' in vname:

                data    = ncf_in.GetVariable(vname)[index]
             
                dimensemble     = ncf.AddDim('members',data.shape[0])

                regiondata=[]
                for member in data:
                    aggdata   = StateToGrid(member*area,regionmask,reverse=True,mapname=region_aggregate)
                    regiondata.append( aggdata )


                regiondata              = np.array(regiondata)

                savedict            = ncf.StandardVar(varname=vname)
                savedict['units']   = 'mol/region/s'
                savedict['dims']    = dimdate+dimensemble+dimregs
                
            elif 'flux' in vname:

                data    = ncf_in.GetVariable(vname)[index]

                regiondata  = StateToGrid(data*area,regionmask,reverse=True,mapname=region_aggregate)

                savedict            = ncf.StandardVar(varname=vname)
                savedict['dims']    = dimdate+dimregs
                savedict['units']   = 'mol/region/s'

            else:

                data    = ncf_in.GetVariable(vname)[:]
                regiondata  = StateToGrid(data,regionmask,reverse=True,mapname=region_aggregate)

                savedict            = ncf.StandardVar(varname=vname)
                savedict['dims']    = dimdate+dimregs

            savedict['count']   = index
            savedict['values']  = regiondata
            ncf.AddData(savedict)

        ncf_in.close()
    ncf.close()

    msg="%s aggregated weekly average fluxes now written"%dimname ; logging.info(msg)

    return saveas

def SaveTimeAvgData(DaCycle,infile,avg='monthly'):
    """ Function saves time mean surface flux data to NetCDF files
        
        *** Inputs ***
        rundat : a RunInfo object

        *** Outputs ***
        daily NetCDF file containing 1-hourly global surface fluxes at 1x1 degree

        *** Example ***
        ./expand_savestate project=enkf_release sd=20000101 ed=20010101 """

    import da.analysis.tools_time as timetools
    import da.tools.io4 as io

    if 'weekly' in infile: intime = 'weekly'
    if 'monthly' in infile: intime = 'monthly'
    if 'yearly' in infile: intime = 'yearly'

    dirname,filename    = os.path.split(infile)
    outdir              = CreateDirs(os.path.join(DaCycle['dir.analysis'],dirname.replace(intime,avg)))

    dectime0            = date2num(datetime(2000,1,1))

# Create NetCDF output file
#
    saveas              = os.path.join(outdir,filename)
    ncf                 = io.CT_CDF(saveas,'create')
    dimdate             = ncf.AddDateDim()
#
# Open input file specified from the command line
#
    if not os.path.exists(infile):
        logging.error( "Needed input file (%s) not found. Please create this first:"%infile )
        logging.error( "returning..." )
        return None 
    else:
        pass

    file        = io.CT_Read(infile,'read')
    datasets    = file.variables.keys()
    date        = file.GetVariable('date')
    globatts    = file.ncattrs()

    for att in globatts:
        attval = file.getncattr(att)
        if not att in ncf.ncattrs():
            ncf.setncattr(att,attval)


    time        = [datetime(2000,1,1)+timedelta(days=d) for d in date]

# loop over datasets in infile, skip idate and date as we will make new time axis for the averaged data

    for sds in ['date']+datasets:

# get original data

        data        = file.GetVariable(sds)
        varatts     = file.variables[sds].ncattrs()
        vardims     = file.variables[sds].dimensions
#
# Depending on dims of input dataset, create dims for output dataset. Note that we add the new dimdate now.
#

        for d in vardims:
            if 'date' in d: 
                continue
            if d in ncf.dimensions.keys(): 
                pass
            else:
                dim = ncf.createDimension(d,size=len(file.dimensions[d]))

        savedict                    = ncf.StandardVar(sds)
        savedict['name']            = sds
        savedict['dims']            = vardims
        savedict['units']           = file.variables[sds].units
        savedict['long_name']       = file.variables[sds].long_name
        savedict['comment']         = file.variables[sds].comment
        savedict['standard_name']   = file.variables[sds].standard_name
        savedict['count']           = 0

        if not 'date' in vardims: 
            savedict['values']          = data
            ncf.AddData(savedict)
        else:

            if avg == 'monthly':
                time_avg, data_avg      = timetools.monthly_avg(time,data)
            elif avg == 'seasonal':
                time_avg, data_avg      = timetools.season_avg(time,data)
            elif avg == 'yearly':
                time_avg, data_avg      = timetools.yearly_avg(time,data)
            elif avg == 'longterm':
                time_avg, data_avg      = timetools.longterm_avg(time,data)
                time_avg                = [time_avg]
                data_avg                = [data_avg]
            else:
                raise ValueError,'Averaging (%s) does not exist' % avg

            count=-1
            for dd,data in zip(time_avg,data_avg):
                count=count+1
                if sds == 'date':
                    savedict['values'] = date2num(dd)-dectime0
                else:
                    savedict['values']=data
                savedict['count']=count
                ncf.AddData(savedict,silent=True)

                sys.stdout.write('.')

            sys.stdout.write('\n')
            sys.stdout.flush()

# end NetCDF file access
    file.close()
    ncf.close()

    msg = "------------------- Finished time averaging---------------------------------"       ; logging.info(msg)


    return saveas

if __name__ == "__main__":

    import logging
    from da.tools.initexit import CycleControl
    from da.ct.dasystem import CtDaSystem 
    from da.ct.statevector import CtStateVector 

    sys.path.append('../../')

    logging.root.setLevel(logging.DEBUG)

    DaCycle = CycleControl(args={'rc':'../../ctdas-od-gfed2-glb6x4-obspack-full.rc'})
    DaCycle.Initialize()
    DaCycle.ParseTimes()

    DaSystem    = CtDaSystem('../rc/carbontracker_ct09_opf.rc')
    DaSystem.Initialize()

    DaCycle.DaSystem    = DaSystem

    StateVector = CtStateVector(DaCycle)
    dummy       = StateVector.Initialize()

    while DaCycle['time.end'] < DaCycle['time.finish']:

        savedas_1x1=SaveWeeklyAvg1x1Data(DaCycle, StateVector)
        #savedas_state=SaveWeeklyAvgStateData(DaCycle, StateVector)
        #savedas_tc=SaveWeeklyAvgTCData(DaCycle, StateVector)
        #savedas_tcext=SaveWeeklyAvgExtTCData(DaCycle)
        savedas_olson=SaveWeeklyAvgAggData(DaCycle,region_aggregate='olson')
        savedas_transcom=SaveWeeklyAvgAggData(DaCycle,region_aggregate='transcom')
        savedas_country=SaveWeeklyAvgAggData(DaCycle,region_aggregate='country')

        DaCycle.AdvanceCycleTimes()

    StateVector = None # free memory

    for avg in ['monthly','yearly','longterm']:

        savedas_1x1     = SaveTimeAvgData(DaCycle,savedas_1x1,avg)
        savedas_state   = SaveTimeAvgData(DaCycle,savedas_state,avg)
        savedas_tc      = SaveTimeAvgData(DaCycle,savedas_tc,avg)
        savedas_tcext   = SaveTimeAvgData(DaCycle,savedas_tcext,avg)
        savedas_olson   = SaveTimeAvgData(DaCycle,savedas_olson,avg)
        savedas_transcom   = SaveTimeAvgData(DaCycle,savedas_transcom,avg)
        savedas_country    = SaveTimeAvgData(DaCycle,savedas_country,avg)

    sys.exit(0)