Skip to content
Snippets Groups Projects
Commit 11809ea5 authored by Peters, Wouter's avatar Peters, Wouter
Browse files

TransCom fluxes now completed in analysis step

parent b87080b7
Branches
No related tags found
No related merge requests found
......@@ -5,7 +5,6 @@ sys.path.append('../../')
import os
import getopt
from pylab import array, arange, transpose, date2num
from runinfo import *
from datetime import datetime
from da.tools.general import CreateDirs
from da.tools.io4 import CT_Read, CT_CDF, std_savedict, GetVariable
......@@ -154,248 +153,127 @@ def SaveWeeklyAvg1x1Data(DaCycle):
#
# Return the full name of the NetCDF file so it can be processed by the next routine
#
msg="Gridded weekly average fluxes now written" ; logging.info(msg)
return saveas
def SaveWeeklyAvgEcoData(rundat):
def SaveWeeklyAvgTCData(DaCycle):
""" Function SaveEcoData saves weekly surface flux data to NetCDF files
*** Inputs ***
rundat : a RunInfo object
DaCycle : a DaCycle object
"""
from da.analysis.tools_regions import globarea
from da.analysis.tools_regions import globarea, StateToGrid
from da.analysis.tools_transcom import StateToTranscom, StateCovToTranscom, transcommask
import da.tools.io4 as io
from numpy import dot
import logging
#
dirname = 'data_tc_weekly'
#
dirname = CreateDirs(os.path.join(DaCycle['dir.analysis'],dirname))
#
# Some help variables
#
dectime0 = date2num(datetime(2000,1,1))
dt = DaCycle['cyclelength']
startdate = DaCycle['time.start']
enddate = DaCycle['time.end']
ncfdate = date2num(startdate)-dectime0+dt.days/2.0
dirname='data_eco_weekly'
dirname = CreateDirs(os.path.join(rundat.outputdir,dirname))
msg = "DA Cycle start date is %s" % startdate.strftime('%Y-%m-%d %H:%M') ; logging.info(msg)
msg = "DA Cycle end date is %s" % enddate.strftime('%Y-%m-%d %H:%M') ; logging.info(msg)
# Create NetCDF output file
# Write/Create NetCDF output file
#
saveas = os.path.join(dirname,'ecofluxes.nc')
ncf = CT_CDF(saveas,'write')
dimdate = ncf.AddDateDim()
saveas = os.path.join(dirname,'tcfluxes.nc')
ncf = io.CT_CDF(saveas,'write')
dimdate = ncf.AddDateDim()
dimidateformat = ncf.AddDateDimFormat()
dimregs = ncf.AddRegionDim(type='eco')
dimregs = ncf.AddRegionDim(type='tc')
#
# set title and tell GMT that we are using "pixel registration"
#
setattr(ncf,'Title','CarbonTracker TransCom fluxes')
setattr(ncf,'node_offset',1)
#
dectime0=date2num(datetime(2000,1,1))
skip = ncf.has_date(ncfdate)
if skip:
msg = 'Skipping writing of data for date %s : already present in file %s' % (startdate.strftime('%Y-%m-%d'),saveas) ; logging.warning(msg)
else:
area=globarea()
dt=rundat.dt
print '['
for week,savefile in zip(rundat.weeks,rundat.inputfiles):
# Get input data
skip=ncf.has_date(date2num(week+dt/2)-dectime0)
if skip:
sys.stdout.write('#')
sys.stdout.flush()
continue
try:
hdf=HDF.SD(savefile)
except:
print 'An error occurred opening the file: %s'%savefile
print 'Exiting...'
sys.exit(2)
bio=GetVariable(hdf,rundat.namedict['background.co2.bio.flux'])
oce=GetVariable(hdf,rundat.namedict['background.co2.ocean.flux'])
ff=GetVariable(hdf,rundat.namedict['background.co2.fossil.flux'])
fire=GetVariable(hdf,rundat.namedict['background.co2.fires.flux'])
priorensemble=GetVariable(hdf,rundat.namedict['background.param.ensemble']+'_05')
postparams=GetVariable(hdf,rundat.namedict['final.param.mean']+'_01')
postensemble=GetVariable(hdf,rundat.namedict['final.param.ensemble']+'_01')
hdf.end()
next=ncf.inq_unlimlen()[0]
print next
data=map_to_regions(rundat,bio*area) # units of mole region-1 s-1
savedict=ncf.StandardVar(varname='bio_flux_prior')
savedict['values']=data.tolist()
savedict['dims']=dimdate+dimregs
savedict['units']='mol region-1 s-1'
savedict['count']=next
ncf.AddData(savedict)
data=postparams*map_to_regions(rundat,bio*area) # units of mole region-1 s-1
savedict=ncf.StandardVar(varname='bio_flux_opt')
savedict['values']=data.tolist()
savedict['dims']=dimdate+dimregs
savedict['units']='mol region-1 s-1'
savedict['count']=next
ncf.AddData(savedict)
data=map_to_regions(rundat,bio*area) # units of mole region-1 s-1
priorfluxens=data*priorensemble[1:,:]
covdata=dot(transpose(priorfluxens),priorfluxens)/(rundat.nmembers-1)
savedict=ncf.StandardVar(varname='bio_flux_prior_cov')
savedict['values']=covdata.tolist()
savedict['dims']=dimdate+dimregs+dimregs
savedict['count']=next
ncf.AddData(savedict)
data=map_to_regions(rundat,bio*area) # units of mole region-1 s-1
postfluxens=data*postensemble[1:,:]
covdata=dot(transpose(postfluxens),postfluxens)/(rundat.nmembers-1)
savedict=ncf.StandardVar(varname='bio_flux_opt_cov')
savedict['values']=covdata.tolist()
savedict['dims']=dimdate+dimregs+dimregs
savedict['count']=next
ncf.AddData(savedict)
data=map_to_regions(rundat,oce*area) # units of mole region-1 s-1
savedict=ncf.StandardVar(varname='ocn_flux_prior')
savedict['values']=data.tolist()
savedict['dims']=dimdate+dimregs
savedict['units']='mol region-1 s-1'
savedict['count']=next
ncf.AddData(savedict)
data=postparams*map_to_regions(rundat,oce*area) # units of mole region-1 s-1
savedict=ncf.StandardVar(varname='ocn_flux_opt')
savedict['values']=data.tolist()
savedict['dims']=dimdate+dimregs
savedict['units']='mol region-1 s-1'
savedict['count']=next
ncf.AddData(savedict)
data=map_to_regions(rundat,oce*area) # units of mole region-1 s-1
priorfluxens=data*priorensemble[1:,:]
covdata=dot(transpose(priorfluxens),priorfluxens)/(rundat.nmembers-1)
savedict=ncf.StandardVar(varname='ocn_flux_prior_cov')
savedict['values']=covdata.tolist()
savedict['dims']=dimdate+dimregs+dimregs
savedict['units']='mol2 region-2 s-2'
savedict['count']=next
ncf.AddData(savedict)
data=map_to_regions(rundat,oce*area) # units of mole region-1 s-1
postfluxens=data*postensemble[1:,:]
covdata=dot(transpose(postfluxens),postfluxens)/(rundat.nmembers-1)
savedict=ncf.StandardVar(varname='ocn_flux_opt_cov')
savedict['values']=covdata.tolist()
savedict['dims']=dimdate+dimregs+dimregs
savedict['units']='mol2 region-2 s-2'
savedict['count']=next
ncf.AddData(savedict)
data=map_to_regions(rundat,fire*area) # units of mole region-1 s-1
savedict=ncf.StandardVar(varname='fire_flux_imp')
savedict['values']=data.tolist()
savedict['dims']=dimdate+dimregs
savedict['units']='mol region-1 s-1'
savedict['count']=next
ncf.AddData(savedict)
data=map_to_regions(rundat,ff*area) # units of mole region-1 s-1
savedict=ncf.StandardVar(varname='fossil_flux_imp')
savedict['values']=data.tolist()
savedict['dims']=dimdate+dimregs
savedict['units']='mol region-1 s-1'
savedict['count']=next
ncf.AddData(savedict)
date=(week+rundat.dt/2)
savedict=ncf.StandardVar(varname='date')
savedict['values']=date2num(date)-dectime0
savedict['dims']=dimdate
savedict['count']=next
ncf.AddData(savedict)
sys.stdout.write('.')
sys.stdout.flush()
ncf.close()
print ']'
return saveas
area=globarea()
def SaveWeeklyAvgTCData(rundat):
""" Function SaveEcoData saves weekly surface flux data to NetCDF files
*** Inputs ***
rundat : a RunInfo object
"""
from da.analysis.tools_regions import globarea, StateToGrid
from da.analysis.tools_transcom import StateToTranscom, StateCovToTranscom, transcommask
import da.tools.io4 as io
from numpy import dot
infile=os.path.join(DaCycle['dir.analysis'],'data_flux1x1_weekly','flux_1x1.nc')
if not os.path.exists(infile):
msg="Needed input file (%s) does not exist yet, please create weekly eco flux files first, returning..."%infile ; logging.error(msg)
return None
dirname='data_tc_weekly'
ncf_in = io.CT_Read(infile,'read')
dirname = CreateDirs(os.path.join(rundat.outputdir,dirname))
# Transform data one by one
# Create NetCDF output file
#
saveas = os.path.join(dirname,'tcfluxes.nc')
ncf = io.CT_CDF(saveas,'create')
dimdate = ncf.AddDateDim()
dimidateformat = ncf.AddDateDimFormat()
dimregs = ncf.AddRegionDim(type='tc')
#
dectime0=date2num(datetime(2000,1,1))
# Get the date variable, and find index corresponding to the DaCycle date
try:
dates = ncf_in.variables['date'][:]
except KeyError:
msg = "The variable date cannot be found in the requested input file (%s) " % infile ; logging.error(msg)
msg = "Please make sure you create gridded fluxes before making TC fluxes " ; logging.error(msg)
raise KeyError
area=globarea()
dt=rundat.dt
try:
index = dates.tolist().index(ncfdate)
except ValueError:
msg = "The requested cycle date is not yet available as gridded flux in file %s "% infile ; logging.error(msg)
msg = "Please make sure you create gridded fluxes before making TC fluxes " ; logging.error(msg)
raise ValueError
infile=os.path.join(rundat.outputdir,'data_flux1x1_weekly','flux_1x1.nc')
if not os.path.exists(infile):
print "Needed input file (%s) does not exist yet, please create weekly eco flux files first, returning..."%infile
return None
# First add the date for this cycle to the file, this grows the unlimited dimension
ncf_in = io.CT_Read(infile)
vardict = ncf_in.variables
for vname, vprop in vardict.iteritems():
savedict = ncf.StandardVar(varname='date')
savedict['values'] = ncfdate
savedict['dims'] = dimdate
savedict['count'] = index
dummy = ncf.AddData(savedict)
data = ncf_in.GetVariable(vname)[:]
# Now convert other variables that were inside the flux_1x1 file
if vname=='latitude': continue
elif vname=='longitude': continue
elif vname=='date': dims=dimdate
elif vname=='idate': dims=dimdate+dimidateformat
vardict = ncf_in.variables
for vname, vprop in vardict.iteritems():
if vname not in ['date','idate']:
data = ncf_in.GetVariable(vname)[index]
if vname=='latitude': continue
elif vname=='longitude': continue
if vname not in ['date','idate']:
if 'cov' in vname:
alldata=[]
for dd in data:
dd=StateCovToGrid(dd*area,transcommask,reverse=True)
alldata.append(dd)
data=array(alldata)
dd=StateCovToGrid(data*area,transcommask,reverse=True)
data=array(dd)
dims=dimdate+dimregs+dimregs
else:
alldata=[]
for dd in data:
dd=StateToGrid(dd*area,transcommask,reverse=True)
alldata.append(dd)
data=array(alldata)
dd=StateToGrid(data*area,transcommask,reverse=True)
data=array(dd)
dims=dimdate+dimregs
print vname,data.shape
savedict = ncf.StandardVar(varname=vname)
savedict['values'] = data.tolist()
savedict['dims'] = dims
savedict['units'] = 'mol/region/s'
savedict['count'] = 0
ncf.AddData(savedict,nsets=data.shape[0])
savedict = ncf.StandardVar(varname=vname)
savedict['values'] = data.tolist()
savedict['dims'] = dims
savedict['units'] = 'mol/region/s'
savedict['count'] = index
ncf.AddData(savedict)
ncf_in.close()
ncf.close()
msg="TransCom weekly average fluxes now written" ; logging.info(msg)
return saveas
def SaveTCDataExt(rundat):
......@@ -668,12 +546,12 @@ def SaveTimeAvgData(rundat,infile,avg='monthly'):
if __name__ == "__main__":
import logging
from da.tools.initexit import CycleControl, StartLogger
from da.tools.initexit import CycleControl
from da.ct.dasystem import CtDaSystem
sys.path.append('../../')
StartLogger()
logging.root.setLevel(logging.INFO)
DaCycle = CycleControl(args={'rc':'../../da.rc'})
DaCycle.Initialize()
......@@ -688,41 +566,26 @@ if __name__ == "__main__":
proceed = no
print DaCycle
print DaSystem
# first, parse results from the inverse part of the run
if proceed != all:
proceed = proceed_dialog('Create average 1x1 flux data files? [y|yes, n|no, a|all|yes-to-all ]')
if proceed != no:
while DaCycle['time.start'] < DaCycle['time.finish']:
savedas=SaveWeeklyAvg1x1Data(DaCycle)
savedas=SaveWeeklyAvgTCData(DaCycle)
DaCycle.AdvanceCycleTimes()
sys.exit(2)
a=SaveTimeAvgData(rundat,savedas,avg='monthly')
a=SaveTimeAvgData(rundat,savedas,avg='yearly')
a=SaveTimeAvgData(rundat,savedas,avg='longterm')
if proceed != all:
proceed = proceed_dialog('Create average eco flux data files? [y|yes, n|no, a|all|yes-to-all] ')
if proceed != no:
savedas=SaveWeeklyAvgEcoData(rundat)
a=SaveTimeAvgData(rundat,savedas,avg='monthly')
a=SaveTimeAvgData(rundat,savedas,avg='seasonal')
a=SaveTimeAvgData(rundat,savedas,avg='yearly')
a=SaveTimeAvgData(rundat,savedas,avg='longterm')
if proceed != all:
proceed = proceed_dialog('Create average extended eco flux data files? [y|yes, n|no, a|all|yes-to-all] ')
if proceed != no:
savedas=SaveEcoDataExt(rundat)
a=SaveTimeAvgData(rundat,savedas,avg='monthly')
a=SaveTimeAvgData(rundat,savedas,avg='seasonal')
a=SaveTimeAvgData(rundat,savedas,avg='yearly')
a=SaveTimeAvgData(rundat,savedas,avg='longterm')
if proceed != all:
proceed = proceed_dialog('Create average tc flux data files? [y|yes, n|no, a|all|yes-to-all] ')
if proceed != no:
savedas=SaveWeeklyAvgTCData(rundat)
a=SaveTimeAvgData(rundat,savedas,avg='monthly')
a=SaveTimeAvgData(rundat,savedas,avg='seasonal')
a=SaveTimeAvgData(rundat,savedas,avg='yearly')
......
......@@ -3,15 +3,16 @@
import os
import sys
sys.path.append('../../')
rootdir = os.getcwd().split('da/')[0]
analysisdir = os.path.join(rootdir,'da/analysis')
from string import join, split
from numpy import array, identity, zeros, arange
import da.tools.io4 as io
import da.analysis.mysettings as mysettings
# Get masks of different region definitions
matrix_file = 'regions.nc'
matrix_file = os.path.join(analysisdir,'regions.nc')
cdf_temp = io.CT_CDF(matrix_file,'read')
transcommask = cdf_temp.GetVariable('transcom_regions')
olson240mask = cdf_temp.GetVariable('regions')
......@@ -24,7 +25,7 @@ dummy = cdf_temp.close()
transshort = []
transnams = []
transland = []
temp = open('t3_region_names','r').readlines()
temp = open(os.path.join(analysisdir,'t3_region_names'),'r').readlines()
for line in temp:
items = line.split()
if items:
......@@ -40,7 +41,7 @@ transshort.append("I-NNOP")
olsonnams = []
olsonshort = []
temp = open('olson19_region_names','r').readlines()
temp = open(os.path.join(analysisdir,'olson19_region_names'),'r').readlines()
for line in temp:
items = line.split()
if items:
......@@ -54,7 +55,7 @@ ext_transcomps = []
# Get names of aggregated regions for post aggregation
matrix_file = 'postagg_definitions.nc'
matrix_file = os.path.join(analysisdir,'postagg_definitions.nc')
cdf_temp = io.CT_CDF(matrix_file,'read')
xform = cdf_temp.GetVariable('xform')
keys = cdf_temp.ncattrs()
......@@ -295,11 +296,10 @@ def map_to_tc(data):
from hdf2field import Sds2field
import cPickle
import os
import mysettings
from plottools import rebin
transcommapfile= os.path.join(mysettings.pylibdir,'datasets','tc_land11_oif30.hdf')
transcomconversionfile= os.path.join(mysettings.pylibdir,'datasets','map_to_tc.pickle')
transcommapfile= 'tc_land11_oif30.hdf'
transcomconversionfile= 'map_to_tc.pickle'
try:
regionselect = cPickle.load(open(transcomconversionfile,'rb'))
except:
......
......@@ -70,8 +70,10 @@ EnsembleSmootherPipeline(DaCycle,PlatForm, DaSystem, Samples,StateVector,ObsOper
msg = header+"Starting analysis"+footer ; logging.info(msg)
from da.analysis.expand_fluxes import SaveWeeklyAvg1x1Data
from da.analysis.expand_fluxes import SaveWeeklyAvgTCData
savedas = SaveWeeklyAvg1x1Data(DaCycle)
savedas = SaveWeeklyAvgTCData(DaCycle)
sys.exit(0)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment