Commit 9f392375 authored by Naomi Smith's avatar Naomi Smith
Browse files

Upgraded python files to Python 3. Changed regions file in...

Upgraded python files to Python 3. Changed regions file in da/rc/carbontracker_random.rc to be gridded NH file for Cartesius platform.
parent c75af4c1
......@@ -45,7 +45,7 @@ File created on 21 Ocotber 2008.
def proceed_dialog(txt, yes=['y', 'yes'], all=['a', 'all', 'yes-to-all']):
""" function to ask whether to proceed or not """
response = raw_input(txt)
response = input(txt)
if response.lower() in yes:
return 1
if response.lower() in all:
......@@ -113,7 +113,7 @@ def save_weekly_avg_1x1_data(dacycle, statevector):
fire = np.array(file.get_variable(dacycle.dasystem['background.co2.fires.flux']))
fossil = np.array(file.get_variable(dacycle.dasystem['background.co2.fossil.flux']))
#mapped_parameters = np.array(file.get_variable(dacycle.dasystem['final.param.mean.1x1']))
if dacycle.dasystem['background.co2.biosam.flux'] in file.variables.keys():
if dacycle.dasystem['background.co2.biosam.flux'] in list(file.variables.keys()):
sam = True
biosam = np.array(file.get_variable(dacycle.dasystem['background.co2.biosam.flux']))
firesam = np.array(file.get_variable(dacycle.dasystem['background.co2.firesam.flux']))
......@@ -162,7 +162,7 @@ def save_weekly_avg_1x1_data(dacycle, statevector):
#
# if prior, do not multiply fluxes with parameters, otherwise do
#
print gridensemble.shape, bio.shape, gridmean.shape
print(gridensemble.shape, bio.shape, gridmean.shape)
biomapped = bio * gridmean
oceanmapped = ocean * gridmean
biovarmapped = bio * gridensemble
......@@ -184,7 +184,7 @@ def save_weekly_avg_1x1_data(dacycle, statevector):
savedict['count'] = next
ncf.add_data(savedict)
print biovarmapped.shape
print(biovarmapped.shape)
savedict = ncf.standard_var(varname='bio_flux_%s_ensemble' % qual_short)
savedict['values'] = biovarmapped.tolist()
savedict['dims'] = dimdate + dimensemble + dimgrid
......@@ -301,7 +301,7 @@ def save_weekly_avg_state_data(dacycle, statevector):
fire = np.array(file.get_variable(dacycle.dasystem['background.co2.fires.flux']))
fossil = np.array(file.get_variable(dacycle.dasystem['background.co2.fossil.flux']))
#mapped_parameters = np.array(file.get_variable(dacycle.dasystem['final.param.mean.1x1']))
if dacycle.dasystem['background.co2.biosam.flux'] in file.variables.keys():
if dacycle.dasystem['background.co2.biosam.flux'] in list(file.variables.keys()):
sam = True
biosam = np.array(file.get_variable(dacycle.dasystem['background.co2.biosam.flux']))
firesam = np.array(file.get_variable(dacycle.dasystem['background.co2.firesam.flux']))
......@@ -555,7 +555,7 @@ def save_weekly_avg_tc_data(dacycle, statevector):
# Now convert other variables that were inside the flux_1x1 file
vardict = ncf_in.variables
for vname, vprop in vardict.iteritems():
for vname, vprop in vardict.items():
data = ncf_in.get_variable(vname)[index]
......@@ -680,7 +680,7 @@ def save_weekly_avg_ext_tc_data(dacycle):
# Now convert other variables that were inside the tcfluxes.nc file
vardict = ncf_in.variables
for vname, vprop in vardict.iteritems():
for vname, vprop in vardict.items():
data = ncf_in.get_variable(vname)[index]
......@@ -899,7 +899,7 @@ def save_weekly_avg_agg_data(dacycle, region_aggregate='olson'):
# Now convert other variables that were inside the statevector file
vardict = ncf_in.variables
for vname, vprop in vardict.iteritems():
for vname, vprop in vardict.items():
if vname == 'latitude': continue
elif vname == 'longitude': continue
elif vname == 'date': continue
......@@ -1014,7 +1014,7 @@ def save_time_avg_data(dacycle, infile, avg='monthly'):
pass
file = io.ct_read(infile, 'read')
datasets = file.variables.keys()
datasets = list(file.variables.keys())
date = file.get_variable('date')
globatts = file.ncattrs()
......@@ -1042,7 +1042,7 @@ def save_time_avg_data(dacycle, infile, avg='monthly'):
for d in vardims:
if 'date' in d:
continue
if d in ncf.dimensions.keys():
if d in list(ncf.dimensions.keys()):
pass
else:
dim = ncf.createDimension(d, size=len(file.dimensions[d]))
......@@ -1072,7 +1072,7 @@ def save_time_avg_data(dacycle, infile, avg='monthly'):
time_avg = [time_avg]
data_avg = [data_avg]
else:
raise ValueError, 'Averaging (%s) does not exist' % avg
raise ValueError('Averaging (%s) does not exist' % avg)
count = -1
for dd, data in zip(time_avg, data_avg):
......
This diff is collapsed.
......@@ -28,7 +28,7 @@ import datetime as dt
import os
import sys
import shutil
import time_avg_fluxes as tma
from . import time_avg_fluxes as tma
basedir = '/Storage/CO2/ingrid/'
basedir2 = '/Storage/CO2/peters/'
......@@ -60,12 +60,12 @@ if __name__ == "__main__":
os.makedirs(os.path.join(targetdir,'analysis','data_%s_weekly'%nam) )
timedirs=[]
for ss,vv in sources.iteritems():
for ss,vv in sources.items():
sds,eds = ss.split(' through ')
sd = dt.datetime.strptime(sds,'%Y-%m-%d')
ed = dt.datetime.strptime(eds,'%Y-%m-%d')
timedirs.append([sd,ed,vv])
print sd,ed, vv
print(sd,ed, vv)
while dacycle['time.start'] < dacycle['time.end']:
......
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu.
This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation,
version 3. This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program. If not, see <http://www.gnu.org/licenses/>."""
#!/usr/bin/env python
# merge_ctdas_runs.py
"""
Author : peters
Revision History:
File created on 14 Jul 2014.
This scrip merges the analysis directory from multiple projects into one new folder.
It steps over existing analysis output files from weekly means, and then averages these to daily/monthy/yearly values.
"""
import datetime as dt
import os
import sys
import shutil
import time_avg_fluxes as tma
basedir = '/Storage/CO2/ingrid/'
basedir2 = '/Storage/CO2/peters/'
targetproject = 'geocarbon-ei-sibcasa-gfed4-zoom-gridded-combined-convec-20011230-20130101'
targetdir = os.path.join(basedir2,targetproject)
sources = {
'2000-01-01 through 2011-12-31': os.path.join(basedir,'carbontracker','geocarbon-ei-sibcasa-gfed4-zoom-gridded-convec-combined'),
'2012-01-01 through 2012-12-31': os.path.join(basedir2,'geocarbon-ei-sibcasa-gfed4-zoom-gridded-convec-20111231-20140101'),
}
dirs = ['flux1x1','transcom','country','olson']
dacycle = {}
dacycle['time.start'] = dt.datetime(2000,12,30)
dacycle['time.end'] = dt.datetime(2013,1,1)
dacycle['cyclelength'] = dt.timedelta(days=7)
dacycle['dir.analysis'] = os.path.join(targetdir,'analysis')
if __name__ == "__main__":
if not os.path.exists(targetdir):
os.makedirs(targetdir)
if not os.path.exists(os.path.join(targetdir,'analysis')):
os.makedirs(os.path.join(targetdir,'analysis') )
for nam in dirs:
if not os.path.exists(os.path.join(targetdir,'analysis','data_%s_weekly'%nam)):
os.makedirs(os.path.join(targetdir,'analysis','data_%s_weekly'%nam) )
timedirs=[]
for ss,vv in sources.iteritems():
sds,eds = ss.split(' through ')
sd = dt.datetime.strptime(sds,'%Y-%m-%d')
ed = dt.datetime.strptime(eds,'%Y-%m-%d')
timedirs.append([sd,ed,vv])
print sd,ed, vv
while dacycle['time.start'] < dacycle['time.end']:
# copy the weekly flux1x1 file from the original dir to the new project dir
for td in timedirs:
if dacycle['time.start'] >= td[0] and dacycle['time.start'] <= td[1]:
indir=td[2]
# Now time avg new fluxes
infile = os.path.join(indir,'analysis','data_flux1x1_weekly','flux_1x1.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) )
#print os.path.exists(infile),infile
shutil.copy(infile,infile.replace(indir,targetdir) )
tma.time_avg(dacycle,avg='flux1x1')
infile = os.path.join(indir,'analysis','data_transcom_weekly','transcom_fluxes.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) )
#print os.path.exists(infile),infile
shutil.copy(infile,infile.replace(indir,targetdir) )
tma.time_avg(dacycle,avg='transcom')
infile = os.path.join(indir,'analysis','data_olson_weekly','olson_fluxes.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) )
#print os.path.exists(infile),infile
shutil.copy(infile,infile.replace(indir,targetdir) )
tma.time_avg(dacycle,avg='olson')
infile = os.path.join(indir,'analysis','data_country_weekly','country_fluxes.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) )
#print os.path.exists(infile),infile
shutil.copy(infile,infile.replace(indir,targetdir) )
tma.time_avg(dacycle,avg='country')
dacycle['time.start'] += dacycle['cyclelength']
......@@ -40,8 +40,8 @@ import logging
import copy
from da.analysis.summarize_obs import nice_lon, nice_lat, nice_alt
from PIL import Image
import urllib2
import StringIO
import urllib.request, urllib.error, urllib.parse
import io
"""
General data needed to set up proper aces inside a figure instance
......@@ -336,7 +336,7 @@ def timehistograms_new(fig, infile, option='final'):
# Get a scaling factor for the x-axis range. Now we will include 5 standard deviations
sc = res.std()
print 'sc',sc
print('sc',sc)
# If there is too little data for a reasonable PDF, skip to the next value in the loop
if res.shape[0] < 10: continue
......@@ -435,12 +435,12 @@ def timehistograms_new(fig, infile, option='final'):
#fig.text(0.12,0.16,str1,fontsize=0.8*fontsize,color='0.75')
try:
img = urllib2.urlopen('http://www.esrl.noaa.gov/gmd/webdata/ccgg/ObsPack/images/logos/'+SDSInfo['lab_1_logo']).read()
img = urllib.request.urlopen('http://www.esrl.noaa.gov/gmd/webdata/ccgg/ObsPack/images/logos/'+SDSInfo['lab_1_logo']).read()
except:
logging.warning("No logo found for this program, continuing...")
return fig
im = Image.open(StringIO.StringIO(img))
im = Image.open(io.StringIO(img))
height = im.size[1]
width = im.size[0]
......@@ -674,12 +674,12 @@ def timevssite_new(fig, infile):
#fig.text(0.12, 0.16, str1, fontsize=0.8 * fontsize, color='0.75')
try:
img = urllib2.urlopen('http://www.esrl.noaa.gov/gmd/webdata/ccgg/ObsPack/images/logos/'+SDSInfo['lab_1_logo']).read()
img = urllib.request.urlopen('http://www.esrl.noaa.gov/gmd/webdata/ccgg/ObsPack/images/logos/'+SDSInfo['lab_1_logo']).read()
except:
logging.warning("No logo found for this program, continuing...")
return fig
im = Image.open(StringIO.StringIO(img))
im = Image.open(io.StringIO(img))
height = im.size[1]
width = im.size[0]
......@@ -933,12 +933,12 @@ def residuals_new(fig, infile, option):
#fig.text(0.12, 0.16, str1, fontsize=0.8 * fontsize, color='0.75')
try:
img = urllib2.urlopen('http://www.esrl.noaa.gov/gmd/webdata/ccgg/ObsPack/images/logos/'+SDSInfo['lab_1_logo']).read()
img = urllib.request.urlopen('http://www.esrl.noaa.gov/gmd/webdata/ccgg/ObsPack/images/logos/'+SDSInfo['lab_1_logo']).read()
except:
logging.warning("No logo found for this program, continuing...")
return fig
im = Image.open(StringIO.StringIO(img))
im = Image.open(io.StringIO(img))
height = im.size[1]
width = im.size[0]
......
This diff is collapsed.
......@@ -97,9 +97,9 @@ def summarize_obs(analysisdir, printfmt='html'):
infiles = [os.path.join(mrdir, f) for f in mrfiles if f.endswith('.nc')]
if printfmt == 'tex':
print '\\begin{tabular*}{\\textheight}{l l l l r r r r}'
print 'Code & Name & Lat, Lon, Elev & Lab & N (flagged) & $\\sqrt{R}$ &Inn \\XS &Bias\\\\'
print '\hline\\\\ \n\multicolumn{8}{ c }{Semi-Continuous Surface Samples}\\\\[3pt] '
print('\\begin{tabular*}{\\textheight}{l l l l r r r r}')
print('Code & Name & Lat, Lon, Elev & Lab & N (flagged) & $\\sqrt{R}$ &Inn \\XS &Bias\\\\')
print('\hline\\\\ \n\multicolumn{8}{ c }{Semi-Continuous Surface Samples}\\\\[3pt] ')
fmt = '%8s & ' + ' %55s & ' + '%20s &' + '%6s &' + ' %4d (%d) & ' + ' %5.2f & ' + ' %5.2f & ' + '%+5.2f \\\\'
elif printfmt == 'html':
tablehead = \
......@@ -136,7 +136,7 @@ def summarize_obs(analysisdir, printfmt='html'):
<TD>%s</TD>\n \
</TR>\n"""
elif printfmt == 'scr':
print 'Code Site NObs flagged R Inn X2'
print('Code Site NObs flagged R Inn X2')
fmt = '%8s ' + ' %55s %s %s' + ' %4d ' + ' %4d ' + ' %5.2f ' + ' %5.2f'
table = []
......@@ -363,15 +363,15 @@ def make_map(analysisdir): #makes a map of amount of assimilated observations pe
ax.annotate(labs[i],xy=m(172,86-count),xycoords='data',fontweight='bold')
count = count + 4
fig.text(0.15,0.945,u'\u2022',fontsize=35,color='blue')
fig.text(0.15,0.945,'\u2022',fontsize=35,color='blue')
fig.text(0.16,0.95,': N<250',fontsize=24,color='blue')
fig.text(0.30,0.94,u'\u2022',fontsize=40,color='green')
fig.text(0.30,0.94,'\u2022',fontsize=40,color='green')
fig.text(0.31,0.95,': N<500',fontsize=24,color='green')
fig.text(0.45,0.94,u'\u2022',fontsize=45,color='orange')
fig.text(0.45,0.94,'\u2022',fontsize=45,color='orange')
fig.text(0.46,0.95,': N<750',fontsize=24,color='orange')
fig.text(0.60,0.939,u'\u2022',fontsize=50,color='brown')
fig.text(0.60,0.939,'\u2022',fontsize=50,color='brown')
fig.text(0.61,0.95,': N<1000',fontsize=24,color='brown')
fig.text(0.75,0.938,u'\u2022',fontsize=55,color='red')
fig.text(0.75,0.938,'\u2022',fontsize=55,color='red')
fig.text(0.765,0.95,': N>1000',fontsize=24,color='red')
ax.set_title('Assimilated observations',fontsize=24)
......
This diff is collapsed.
......@@ -33,12 +33,12 @@ def time_avg(dacycle,avg='transcom'):
""" Function to create a set of averaged files in a folder, needed to make longer term means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
raise IOError('Choice of averaging invalid')
analysisdir = dacycle['dir.analysis']
if not os.path.exists(analysisdir):
raise IOError,'analysis dir requested (%s) does not exist, exiting...'%analysisdir
raise IOError('analysis dir requested (%s) does not exist, exiting...'%analysisdir)
daily_avg(dacycle,avg)
......@@ -68,14 +68,14 @@ def daily_avg(dacycle,avg):
""" Function to create a set of daily files in a folder, needed to make longer term means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
raise IOError('Choice of averaging invalid')
analysisdir = dacycle['dir.analysis']
weekdir = os.path.join(analysisdir , 'data_%s_weekly'%avg)
daydir = os.path.join(analysisdir , 'data_%s_daily'%avg)
if not os.path.exists(daydir):
print "Creating new output directory " + daydir
print("Creating new output directory " + daydir)
os.makedirs(daydir)
files = os.listdir(weekdir)
......@@ -88,7 +88,7 @@ def daily_avg(dacycle,avg):
dt = dacycle['cyclelength']
for k,v in fileinfo.iteritems():
for k,v in fileinfo.items():
cycle_file = os.path.join(weekdir,k)
for i in range(abs(dt.days)):
daily_file = os.path.join(daydir,'%s_fluxes.%s.nc'%(avg,(v+datetime.timedelta(days=i)).strftime('%Y-%m-%d')))
......@@ -100,7 +100,7 @@ def monthly_avg(dacycle,avg):
""" Function to average a set of files in a folder from daily to monthly means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
raise IOError('Choice of averaging invalid')
analysisdir = dacycle['dir.analysis']
......@@ -108,7 +108,7 @@ def monthly_avg(dacycle,avg):
monthdir = os.path.join(analysisdir,'data_%s_monthly'%avg)
if not os.path.exists(monthdir):
print "Creating new output directory " + monthdir
print("Creating new output directory " + monthdir)
os.makedirs(monthdir)
......@@ -116,7 +116,7 @@ def monthly_avg(dacycle,avg):
files = [f for f in files if '-' in f and f.endswith('.nc')]
if len(files) < 28:
print 'No month is yet complete, skipping monthly average'
print('No month is yet complete, skipping monthly average')
return
fileinfo = {}
......@@ -124,8 +124,8 @@ def monthly_avg(dacycle,avg):
date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m-%d')
fileinfo[filename] = date
years = [d.year for d in fileinfo.values()] # get actual years
months = set([d.month for d in fileinfo.values()]) # get actual months
years = [d.year for d in list(fileinfo.values())] # get actual years
months = set([d.month for d in list(fileinfo.values())]) # get actual months
sd = datetime.datetime(min(years),1,1)
ed = datetime.datetime(max(years)+1,1,1)
......@@ -136,7 +136,7 @@ def monthly_avg(dacycle,avg):
ndays_in_month = (nd-sd).days
avg_files = [os.path.join(daydir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]
avg_files = [os.path.join(daydir,k) for k,v in fileinfo.items() if v < nd and v >= sd]
if len(avg_files) != ndays_in_month: # only once month complete
#print 'New month (%02d) is not yet complete, skipping monthly average'%(sd.month)
......@@ -144,7 +144,7 @@ def monthly_avg(dacycle,avg):
else:
targetfile = os.path.join(monthdir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y-%m')))
if not os.path.exists(targetfile):
print "New month (%02d) is complete, I have %d days for the next file"%(sd.month,ndays_in_month)
print("New month (%02d) is complete, I have %d days for the next file"%(sd.month,ndays_in_month))
command = ['ncra','-O']+ avg_files + [targetfile]
status = subprocess.check_call(command)
else:
......@@ -156,21 +156,21 @@ def yearly_avg(dacycle,avg):
""" Function to average a set of files in a folder from monthly to yearly means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
raise IOError('Choice of averaging invalid')
analysisdir = dacycle['dir.analysis']
monthdir = os.path.join(analysisdir , 'data_%s_monthly'%avg )
yeardir = os.path.join(analysisdir,'data_%s_yearly'%avg)
if not os.path.exists(yeardir):
print "Creating new output directory " + yeardir
print("Creating new output directory " + yeardir)
os.makedirs(yeardir)
files = os.listdir(monthdir) # get monthly files
files = [f for f in files if '-' in f and f.endswith('.nc')]
if not files:
print "No full year finished yet, skipping yearly average..."
print("No full year finished yet, skipping yearly average...")
return
fileinfo = {}
......@@ -178,7 +178,7 @@ def yearly_avg(dacycle,avg):
date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m')
fileinfo[filename] = date
years = set([d.year for d in fileinfo.values()])
years = set([d.year for d in list(fileinfo.values())])
sd = datetime.datetime(min(years),1,1)
ed = datetime.datetime(max(years)+1,1,1)
......@@ -187,15 +187,15 @@ def yearly_avg(dacycle,avg):
nd = sd + relativedelta(years=+1)
avg_files = [os.path.join(monthdir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]
avg_files = [os.path.join(monthdir,k) for k,v in fileinfo.items() if v < nd and v >= sd]
if not len(avg_files) == 12 :
print "Year %04d not finished yet, skipping yearly average..."%sd.year
print("Year %04d not finished yet, skipping yearly average..."%sd.year)
else:
targetfile = os.path.join(yeardir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y')))
if not os.path.exists(targetfile):
print "Year %04d is complete, I have 12 months for the next file"%sd.year
print("Year %04d is complete, I have 12 months for the next file"%sd.year)
command = ['ncra','-O']+ avg_files + [targetfile]
status = subprocess.check_call(command)
......@@ -205,7 +205,7 @@ def longterm_avg(dacycle,avg):
""" Function to average a set of files in a folder from monthly to yearly means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
raise IOError('Choice of averaging invalid')
analysisdir = dacycle['dir.analysis']
......@@ -213,14 +213,14 @@ def longterm_avg(dacycle,avg):
longtermdir = os.path.join(analysisdir,'data_%s_longterm'%avg)
if not os.path.exists(longtermdir):
print "Creating new output directory " + longtermdir
print("Creating new output directory " + longtermdir)
os.makedirs(longtermdir)
files = os.listdir(yeardir)
files = [f for f in files if '-' in f and f.endswith('.nc')]
if not files:
print "No full year finished yet, skipping longterm average..."
print("No full year finished yet, skipping longterm average...")
return
dates = []
......
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu.
This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation,
version 3. This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program. If not, see <http://www.gnu.org/licenses/>."""
#!/usr/bin/env python
# time_avg_fluxes.py
"""
Author : peters
Revision History:
File created on 20 Dec 2012.
"""
import sys
sys.path.append('../../')
import os
import sys
import shutil
from dateutil.relativedelta import relativedelta
import datetime
import subprocess
def time_avg(dacycle,avg='transcom'):
""" Function to create a set of averaged files in a folder, needed to make longer term means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
if not os.path.exists(analysisdir):
raise IOError,'analysis dir requested (%s) does not exist, exiting...'%analysisdir
daily_avg(dacycle,avg)
monthly_avg(dacycle,avg)
yearly_avg(dacycle,avg)
longterm_avg(dacycle,avg)
def new_month(dacycle):
""" check whether we just entered a new month"""
this_month = dacycle['time.start'].month
prev_month = (dacycle['time.start']-dacycle['cyclelength']).month
return (this_month != prev_month)
def new_year(dacycle):
""" check whether we just entered a new year"""
this_year = dacycle['time.start'].year
prev_year = (dacycle['time.start']-dacycle['cyclelength']).year
return (this_year != prev_year)
def daily_avg(dacycle,avg):
""" Function to create a set of daily files in a folder, needed to make longer term means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
weekdir = os.path.join(analysisdir , 'data_%s_weekly'%avg)
daydir = os.path.join(analysisdir , 'data_%s_daily'%avg)
if not os.path.exists(daydir):
print "Creating new output directory " + daydir
os.makedirs(daydir)
files = os.listdir(weekdir)
files = [f for f in files if '-' in f and f.endswith('.nc')]
fileinfo = {}
for filename in files:
date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m-%d')
fileinfo[filename] = date
dt = dacycle['cyclelength']
for k,v in fileinfo.iteritems():
cycle_file = os.path.join(weekdir,k)
for i in range(abs(dt.days)):
daily_file = os.path.join(daydir,'%s_fluxes.%s.nc'%(avg,(v+datetime.timedelta(days=i)).strftime('%Y-%m-%d')))
if not os.path.lexists(daily_file):
os.symlink(cycle_file,daily_file)
#print daily_file,cycle_file
def monthly_avg(dacycle,avg):
""" Function to average a set of files in a folder from daily to monthly means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'