Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • classes
  • ctdas-cosmo
  • feature-STILT
  • griddedstatevector
  • ivar
  • karolina
  • liesbeth
  • master
  • package
  • py-3
  • remotec
  • trunk
  • wouter
13 results

Target

Select target project
  • nearrealtimectdas/CTDAS
  • tsurata_a/CTDAS
  • peter050/CTDAS
  • woude033/CTDAS
  • flore019/CTDAS
  • laan035/CTDAS
  • koren007/CTDAS
  • smith036/CTDAS
  • ctdas/CTDAS
9 results
Select Git revision
Show changes
Commits on Source (2)
Showing
with 4154 additions and 102 deletions
File added
......@@ -45,7 +45,7 @@ File created on 21 Ocotber 2008.
def proceed_dialog(txt, yes=['y', 'yes'], all=['a', 'all', 'yes-to-all']):
""" function to ask whether to proceed or not """
response = raw_input(txt)
response = input(txt)
if response.lower() in yes:
return 1
if response.lower() in all:
......@@ -113,7 +113,7 @@ def save_weekly_avg_1x1_data(dacycle, statevector):
fire = np.array(file.get_variable(dacycle.dasystem['background.co2.fires.flux']))
fossil = np.array(file.get_variable(dacycle.dasystem['background.co2.fossil.flux']))
#mapped_parameters = np.array(file.get_variable(dacycle.dasystem['final.param.mean.1x1']))
if dacycle.dasystem['background.co2.biosam.flux'] in file.variables.keys():
if dacycle.dasystem['background.co2.biosam.flux'] in list(file.variables.keys()):
sam = True
biosam = np.array(file.get_variable(dacycle.dasystem['background.co2.biosam.flux']))
firesam = np.array(file.get_variable(dacycle.dasystem['background.co2.firesam.flux']))
......@@ -162,7 +162,7 @@ def save_weekly_avg_1x1_data(dacycle, statevector):
#
# if prior, do not multiply fluxes with parameters, otherwise do
#
print gridensemble.shape, bio.shape, gridmean.shape
print(gridensemble.shape, bio.shape, gridmean.shape)
biomapped = bio * gridmean
oceanmapped = ocean * gridmean
biovarmapped = bio * gridensemble
......@@ -184,7 +184,7 @@ def save_weekly_avg_1x1_data(dacycle, statevector):
savedict['count'] = next
ncf.add_data(savedict)
print biovarmapped.shape
print(biovarmapped.shape)
savedict = ncf.standard_var(varname='bio_flux_%s_ensemble' % qual_short)
savedict['values'] = biovarmapped.tolist()
savedict['dims'] = dimdate + dimensemble + dimgrid
......@@ -301,7 +301,7 @@ def save_weekly_avg_state_data(dacycle, statevector):
fire = np.array(file.get_variable(dacycle.dasystem['background.co2.fires.flux']))
fossil = np.array(file.get_variable(dacycle.dasystem['background.co2.fossil.flux']))
#mapped_parameters = np.array(file.get_variable(dacycle.dasystem['final.param.mean.1x1']))
if dacycle.dasystem['background.co2.biosam.flux'] in file.variables.keys():
if dacycle.dasystem['background.co2.biosam.flux'] in list(file.variables.keys()):
sam = True
biosam = np.array(file.get_variable(dacycle.dasystem['background.co2.biosam.flux']))
firesam = np.array(file.get_variable(dacycle.dasystem['background.co2.firesam.flux']))
......@@ -555,7 +555,7 @@ def save_weekly_avg_tc_data(dacycle, statevector):
# Now convert other variables that were inside the flux_1x1 file
vardict = ncf_in.variables
for vname, vprop in vardict.iteritems():
for vname, vprop in vardict.items():
data = ncf_in.get_variable(vname)[index]
......@@ -680,7 +680,7 @@ def save_weekly_avg_ext_tc_data(dacycle):
# Now convert other variables that were inside the tcfluxes.nc file
vardict = ncf_in.variables
for vname, vprop in vardict.iteritems():
for vname, vprop in vardict.items():
data = ncf_in.get_variable(vname)[index]
......@@ -899,7 +899,7 @@ def save_weekly_avg_agg_data(dacycle, region_aggregate='olson'):
# Now convert other variables that were inside the statevector file
vardict = ncf_in.variables
for vname, vprop in vardict.iteritems():
for vname, vprop in vardict.items():
if vname == 'latitude': continue
elif vname == 'longitude': continue
elif vname == 'date': continue
......@@ -1014,7 +1014,7 @@ def save_time_avg_data(dacycle, infile, avg='monthly'):
pass
file = io.ct_read(infile, 'read')
datasets = file.variables.keys()
datasets = list(file.variables.keys())
date = file.get_variable('date')
globatts = file.ncattrs()
......@@ -1042,7 +1042,7 @@ def save_time_avg_data(dacycle, infile, avg='monthly'):
for d in vardims:
if 'date' in d:
continue
if d in ncf.dimensions.keys():
if d in list(ncf.dimensions.keys()):
pass
else:
dim = ncf.createDimension(d, size=len(file.dimensions[d]))
......@@ -1072,7 +1072,7 @@ def save_time_avg_data(dacycle, infile, avg='monthly'):
time_avg = [time_avg]
data_avg = [data_avg]
else:
raise ValueError, 'Averaging (%s) does not exist' % avg
raise ValueError('Averaging (%s) does not exist' % avg)
count = -1
for dd, data in zip(time_avg, data_avg):
......
This diff is collapsed.
......@@ -28,7 +28,7 @@ import datetime as dt
import os
import sys
import shutil
import time_avg_fluxes as tma
from . import time_avg_fluxes as tma
basedir = '/Storage/CO2/ingrid/'
basedir2 = '/Storage/CO2/peters/'
......@@ -60,12 +60,12 @@ if __name__ == "__main__":
os.makedirs(os.path.join(targetdir,'analysis','data_%s_weekly'%nam) )
timedirs=[]
for ss,vv in sources.iteritems():
for ss,vv in sources.items():
sds,eds = ss.split(' through ')
sd = dt.datetime.strptime(sds,'%Y-%m-%d')
ed = dt.datetime.strptime(eds,'%Y-%m-%d')
timedirs.append([sd,ed,vv])
print sd,ed, vv
print(sd,ed, vv)
while dacycle['time.start'] < dacycle['time.end']:
......
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu.
This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation,
version 3. This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program. If not, see <http://www.gnu.org/licenses/>."""
#!/usr/bin/env python
# merge_ctdas_runs.py
"""
Author : peters
Revision History:
File created on 14 Jul 2014.
This scrip merges the analysis directory from multiple projects into one new folder.
It steps over existing analysis output files from weekly means, and then averages these to daily/monthy/yearly values.
"""
import datetime as dt
import os
import sys
import shutil
import time_avg_fluxes as tma
basedir = '/Storage/CO2/ingrid/'
basedir2 = '/Storage/CO2/peters/'
targetproject = 'geocarbon-ei-sibcasa-gfed4-zoom-gridded-combined-convec-20011230-20130101'
targetdir = os.path.join(basedir2,targetproject)
sources = {
'2000-01-01 through 2011-12-31': os.path.join(basedir,'carbontracker','geocarbon-ei-sibcasa-gfed4-zoom-gridded-convec-combined'),
'2012-01-01 through 2012-12-31': os.path.join(basedir2,'geocarbon-ei-sibcasa-gfed4-zoom-gridded-convec-20111231-20140101'),
}
dirs = ['flux1x1','transcom','country','olson']
dacycle = {}
dacycle['time.start'] = dt.datetime(2000,12,30)
dacycle['time.end'] = dt.datetime(2013,1,1)
dacycle['cyclelength'] = dt.timedelta(days=7)
dacycle['dir.analysis'] = os.path.join(targetdir,'analysis')
if __name__ == "__main__":
if not os.path.exists(targetdir):
os.makedirs(targetdir)
if not os.path.exists(os.path.join(targetdir,'analysis')):
os.makedirs(os.path.join(targetdir,'analysis') )
for nam in dirs:
if not os.path.exists(os.path.join(targetdir,'analysis','data_%s_weekly'%nam)):
os.makedirs(os.path.join(targetdir,'analysis','data_%s_weekly'%nam) )
timedirs=[]
for ss,vv in sources.iteritems():
sds,eds = ss.split(' through ')
sd = dt.datetime.strptime(sds,'%Y-%m-%d')
ed = dt.datetime.strptime(eds,'%Y-%m-%d')
timedirs.append([sd,ed,vv])
print sd,ed, vv
while dacycle['time.start'] < dacycle['time.end']:
# copy the weekly flux1x1 file from the original dir to the new project dir
for td in timedirs:
if dacycle['time.start'] >= td[0] and dacycle['time.start'] <= td[1]:
indir=td[2]
# Now time avg new fluxes
infile = os.path.join(indir,'analysis','data_flux1x1_weekly','flux_1x1.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) )
#print os.path.exists(infile),infile
shutil.copy(infile,infile.replace(indir,targetdir) )
tma.time_avg(dacycle,avg='flux1x1')
infile = os.path.join(indir,'analysis','data_transcom_weekly','transcom_fluxes.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) )
#print os.path.exists(infile),infile
shutil.copy(infile,infile.replace(indir,targetdir) )
tma.time_avg(dacycle,avg='transcom')
infile = os.path.join(indir,'analysis','data_olson_weekly','olson_fluxes.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) )
#print os.path.exists(infile),infile
shutil.copy(infile,infile.replace(indir,targetdir) )
tma.time_avg(dacycle,avg='olson')
infile = os.path.join(indir,'analysis','data_country_weekly','country_fluxes.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) )
#print os.path.exists(infile),infile
shutil.copy(infile,infile.replace(indir,targetdir) )
tma.time_avg(dacycle,avg='country')
dacycle['time.start'] += dacycle['cyclelength']
......@@ -40,8 +40,8 @@ import logging
import copy
from da.analysis.summarize_obs import nice_lon, nice_lat, nice_alt
from PIL import Image
import urllib2
import StringIO
import urllib.request, urllib.error, urllib.parse
import io
"""
General data needed to set up proper aces inside a figure instance
......@@ -336,7 +336,7 @@ def timehistograms_new(fig, infile, option='final'):
# Get a scaling factor for the x-axis range. Now we will include 5 standard deviations
sc = res.std()
print 'sc',sc
print('sc',sc)
# If there is too little data for a reasonable PDF, skip to the next value in the loop
if res.shape[0] < 10: continue
......@@ -435,12 +435,12 @@ def timehistograms_new(fig, infile, option='final'):
#fig.text(0.12,0.16,str1,fontsize=0.8*fontsize,color='0.75')
try:
img = urllib2.urlopen('http://www.esrl.noaa.gov/gmd/webdata/ccgg/ObsPack/images/logos/'+SDSInfo['lab_1_logo']).read()
img = urllib.request.urlopen('http://www.esrl.noaa.gov/gmd/webdata/ccgg/ObsPack/images/logos/'+SDSInfo['lab_1_logo']).read()
except:
logging.warning("No logo found for this program, continuing...")
return fig
im = Image.open(StringIO.StringIO(img))
im = Image.open(io.StringIO(img))
height = im.size[1]
width = im.size[0]
......@@ -674,12 +674,12 @@ def timevssite_new(fig, infile):
#fig.text(0.12, 0.16, str1, fontsize=0.8 * fontsize, color='0.75')
try:
img = urllib2.urlopen('http://www.esrl.noaa.gov/gmd/webdata/ccgg/ObsPack/images/logos/'+SDSInfo['lab_1_logo']).read()
img = urllib.request.urlopen('http://www.esrl.noaa.gov/gmd/webdata/ccgg/ObsPack/images/logos/'+SDSInfo['lab_1_logo']).read()
except:
logging.warning("No logo found for this program, continuing...")
return fig
im = Image.open(StringIO.StringIO(img))
im = Image.open(io.StringIO(img))
height = im.size[1]
width = im.size[0]
......@@ -933,12 +933,12 @@ def residuals_new(fig, infile, option):
#fig.text(0.12, 0.16, str1, fontsize=0.8 * fontsize, color='0.75')
try:
img = urllib2.urlopen('http://www.esrl.noaa.gov/gmd/webdata/ccgg/ObsPack/images/logos/'+SDSInfo['lab_1_logo']).read()
img = urllib.request.urlopen('http://www.esrl.noaa.gov/gmd/webdata/ccgg/ObsPack/images/logos/'+SDSInfo['lab_1_logo']).read()
except:
logging.warning("No logo found for this program, continuing...")
return fig
im = Image.open(StringIO.StringIO(img))
im = Image.open(io.StringIO(img))
height = im.size[1]
width = im.size[0]
......
This diff is collapsed.
......@@ -97,9 +97,9 @@ def summarize_obs(analysisdir, printfmt='html'):
infiles = [os.path.join(mrdir, f) for f in mrfiles if f.endswith('.nc')]
if printfmt == 'tex':
print '\\begin{tabular*}{\\textheight}{l l l l r r r r}'
print 'Code & Name & Lat, Lon, Elev & Lab & N (flagged) & $\\sqrt{R}$ &Inn \\XS &Bias\\\\'
print '\hline\\\\ \n\multicolumn{8}{ c }{Semi-Continuous Surface Samples}\\\\[3pt] '
print('\\begin{tabular*}{\\textheight}{l l l l r r r r}')
print('Code & Name & Lat, Lon, Elev & Lab & N (flagged) & $\\sqrt{R}$ &Inn \\XS &Bias\\\\')
print('\hline\\\\ \n\multicolumn{8}{ c }{Semi-Continuous Surface Samples}\\\\[3pt] ')
fmt = '%8s & ' + ' %55s & ' + '%20s &' + '%6s &' + ' %4d (%d) & ' + ' %5.2f & ' + ' %5.2f & ' + '%+5.2f \\\\'
elif printfmt == 'html':
tablehead = \
......@@ -136,7 +136,7 @@ def summarize_obs(analysisdir, printfmt='html'):
<TD>%s</TD>\n \
</TR>\n"""
elif printfmt == 'scr':
print 'Code Site NObs flagged R Inn X2'
print('Code Site NObs flagged R Inn X2')
fmt = '%8s ' + ' %55s %s %s' + ' %4d ' + ' %4d ' + ' %5.2f ' + ' %5.2f'
table = []
......@@ -363,15 +363,15 @@ def make_map(analysisdir): #makes a map of amount of assimilated observations pe
ax.annotate(labs[i],xy=m(172,86-count),xycoords='data',fontweight='bold')
count = count + 4
fig.text(0.15,0.945,u'\u2022',fontsize=35,color='blue')
fig.text(0.15,0.945,'\u2022',fontsize=35,color='blue')
fig.text(0.16,0.95,': N<250',fontsize=24,color='blue')
fig.text(0.30,0.94,u'\u2022',fontsize=40,color='green')
fig.text(0.30,0.94,'\u2022',fontsize=40,color='green')
fig.text(0.31,0.95,': N<500',fontsize=24,color='green')
fig.text(0.45,0.94,u'\u2022',fontsize=45,color='orange')
fig.text(0.45,0.94,'\u2022',fontsize=45,color='orange')
fig.text(0.46,0.95,': N<750',fontsize=24,color='orange')
fig.text(0.60,0.939,u'\u2022',fontsize=50,color='brown')
fig.text(0.60,0.939,'\u2022',fontsize=50,color='brown')
fig.text(0.61,0.95,': N<1000',fontsize=24,color='brown')
fig.text(0.75,0.938,u'\u2022',fontsize=55,color='red')
fig.text(0.75,0.938,'\u2022',fontsize=55,color='red')
fig.text(0.765,0.95,': N>1000',fontsize=24,color='red')
ax.set_title('Assimilated observations',fontsize=24)
......
This diff is collapsed.
......@@ -33,12 +33,12 @@ def time_avg(dacycle,avg='transcom'):
""" Function to create a set of averaged files in a folder, needed to make longer term means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
raise IOError('Choice of averaging invalid')
analysisdir = dacycle['dir.analysis']
if not os.path.exists(analysisdir):
raise IOError,'analysis dir requested (%s) does not exist, exiting...'%analysisdir
raise IOError('analysis dir requested (%s) does not exist, exiting...'%analysisdir)
daily_avg(dacycle,avg)
......@@ -68,14 +68,14 @@ def daily_avg(dacycle,avg):
""" Function to create a set of daily files in a folder, needed to make longer term means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
raise IOError('Choice of averaging invalid')
analysisdir = dacycle['dir.analysis']
weekdir = os.path.join(analysisdir , 'data_%s_weekly'%avg)
daydir = os.path.join(analysisdir , 'data_%s_daily'%avg)
if not os.path.exists(daydir):
print "Creating new output directory " + daydir
print("Creating new output directory " + daydir)
os.makedirs(daydir)
files = os.listdir(weekdir)
......@@ -88,7 +88,7 @@ def daily_avg(dacycle,avg):
dt = dacycle['cyclelength']
for k,v in fileinfo.iteritems():
for k,v in fileinfo.items():
cycle_file = os.path.join(weekdir,k)
for i in range(abs(dt.days)):
daily_file = os.path.join(daydir,'%s_fluxes.%s.nc'%(avg,(v+datetime.timedelta(days=i)).strftime('%Y-%m-%d')))
......@@ -100,7 +100,7 @@ def monthly_avg(dacycle,avg):
""" Function to average a set of files in a folder from daily to monthly means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
raise IOError('Choice of averaging invalid')
analysisdir = dacycle['dir.analysis']
......@@ -108,7 +108,7 @@ def monthly_avg(dacycle,avg):
monthdir = os.path.join(analysisdir,'data_%s_monthly'%avg)
if not os.path.exists(monthdir):
print "Creating new output directory " + monthdir
print("Creating new output directory " + monthdir)
os.makedirs(monthdir)
......@@ -116,7 +116,7 @@ def monthly_avg(dacycle,avg):
files = [f for f in files if '-' in f and f.endswith('.nc')]
if len(files) < 28:
print 'No month is yet complete, skipping monthly average'
print('No month is yet complete, skipping monthly average')
return
fileinfo = {}
......@@ -124,8 +124,8 @@ def monthly_avg(dacycle,avg):
date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m-%d')
fileinfo[filename] = date
years = [d.year for d in fileinfo.values()] # get actual years
months = set([d.month for d in fileinfo.values()]) # get actual months
years = [d.year for d in list(fileinfo.values())] # get actual years
months = set([d.month for d in list(fileinfo.values())]) # get actual months
sd = datetime.datetime(min(years),1,1)
ed = datetime.datetime(max(years)+1,1,1)
......@@ -136,7 +136,7 @@ def monthly_avg(dacycle,avg):
ndays_in_month = (nd-sd).days
avg_files = [os.path.join(daydir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]
avg_files = [os.path.join(daydir,k) for k,v in fileinfo.items() if v < nd and v >= sd]
if len(avg_files) != ndays_in_month: # only once month complete
#print 'New month (%02d) is not yet complete, skipping monthly average'%(sd.month)
......@@ -144,7 +144,7 @@ def monthly_avg(dacycle,avg):
else:
targetfile = os.path.join(monthdir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y-%m')))
if not os.path.exists(targetfile):
print "New month (%02d) is complete, I have %d days for the next file"%(sd.month,ndays_in_month)
print("New month (%02d) is complete, I have %d days for the next file"%(sd.month,ndays_in_month))
command = ['ncra','-O']+ avg_files + [targetfile]
status = subprocess.check_call(command)
else:
......@@ -156,21 +156,21 @@ def yearly_avg(dacycle,avg):
""" Function to average a set of files in a folder from monthly to yearly means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
raise IOError('Choice of averaging invalid')
analysisdir = dacycle['dir.analysis']
monthdir = os.path.join(analysisdir , 'data_%s_monthly'%avg )
yeardir = os.path.join(analysisdir,'data_%s_yearly'%avg)
if not os.path.exists(yeardir):
print "Creating new output directory " + yeardir
print("Creating new output directory " + yeardir)
os.makedirs(yeardir)
files = os.listdir(monthdir) # get monthly files
files = [f for f in files if '-' in f and f.endswith('.nc')]
if not files:
print "No full year finished yet, skipping yearly average..."
print("No full year finished yet, skipping yearly average...")
return
fileinfo = {}
......@@ -178,7 +178,7 @@ def yearly_avg(dacycle,avg):
date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m')
fileinfo[filename] = date
years = set([d.year for d in fileinfo.values()])
years = set([d.year for d in list(fileinfo.values())])
sd = datetime.datetime(min(years),1,1)
ed = datetime.datetime(max(years)+1,1,1)
......@@ -187,15 +187,15 @@ def yearly_avg(dacycle,avg):
nd = sd + relativedelta(years=+1)
avg_files = [os.path.join(monthdir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]
avg_files = [os.path.join(monthdir,k) for k,v in fileinfo.items() if v < nd and v >= sd]
if not len(avg_files) == 12 :
print "Year %04d not finished yet, skipping yearly average..."%sd.year
print("Year %04d not finished yet, skipping yearly average..."%sd.year)
else:
targetfile = os.path.join(yeardir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y')))
if not os.path.exists(targetfile):
print "Year %04d is complete, I have 12 months for the next file"%sd.year
print("Year %04d is complete, I have 12 months for the next file"%sd.year)
command = ['ncra','-O']+ avg_files + [targetfile]
status = subprocess.check_call(command)
......@@ -205,7 +205,7 @@ def longterm_avg(dacycle,avg):
""" Function to average a set of files in a folder from monthly to yearly means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
raise IOError('Choice of averaging invalid')
analysisdir = dacycle['dir.analysis']
......@@ -213,14 +213,14 @@ def longterm_avg(dacycle,avg):
longtermdir = os.path.join(analysisdir,'data_%s_longterm'%avg)
if not os.path.exists(longtermdir):
print "Creating new output directory " + longtermdir
print("Creating new output directory " + longtermdir)
os.makedirs(longtermdir)
files = os.listdir(yeardir)
files = [f for f in files if '-' in f and f.endswith('.nc')]
if not files:
print "No full year finished yet, skipping longterm average..."
print("No full year finished yet, skipping longterm average...")
return
dates = []
......
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu.
This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation,
version 3. This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program. If not, see <http://www.gnu.org/licenses/>."""
#!/usr/bin/env python
# time_avg_fluxes.py
"""
Author : peters
Revision History:
File created on 20 Dec 2012.
"""
import sys
sys.path.append('../../')
import os
import sys
import shutil
from dateutil.relativedelta import relativedelta
import datetime
import subprocess
def time_avg(dacycle,avg='transcom'):
""" Function to create a set of averaged files in a folder, needed to make longer term means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
if not os.path.exists(analysisdir):
raise IOError,'analysis dir requested (%s) does not exist, exiting...'%analysisdir
daily_avg(dacycle,avg)
monthly_avg(dacycle,avg)
yearly_avg(dacycle,avg)
longterm_avg(dacycle,avg)
def new_month(dacycle):
""" check whether we just entered a new month"""
this_month = dacycle['time.start'].month
prev_month = (dacycle['time.start']-dacycle['cyclelength']).month
return (this_month != prev_month)
def new_year(dacycle):
""" check whether we just entered a new year"""
this_year = dacycle['time.start'].year
prev_year = (dacycle['time.start']-dacycle['cyclelength']).year
return (this_year != prev_year)
def daily_avg(dacycle,avg):
""" Function to create a set of daily files in a folder, needed to make longer term means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
weekdir = os.path.join(analysisdir , 'data_%s_weekly'%avg)
daydir = os.path.join(analysisdir , 'data_%s_daily'%avg)
if not os.path.exists(daydir):
print "Creating new output directory " + daydir
os.makedirs(daydir)
files = os.listdir(weekdir)
files = [f for f in files if '-' in f and f.endswith('.nc')]
fileinfo = {}
for filename in files:
date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m-%d')
fileinfo[filename] = date
dt = dacycle['cyclelength']
for k,v in fileinfo.iteritems():
cycle_file = os.path.join(weekdir,k)
for i in range(abs(dt.days)):
daily_file = os.path.join(daydir,'%s_fluxes.%s.nc'%(avg,(v+datetime.timedelta(days=i)).strftime('%Y-%m-%d')))
if not os.path.lexists(daily_file):
os.symlink(cycle_file,daily_file)
#print daily_file,cycle_file
def monthly_avg(dacycle,avg):
""" Function to average a set of files in a folder from daily to monthly means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
daydir = os.path.join(analysisdir , 'data_%s_daily'%avg)
monthdir = os.path.join(analysisdir,'data_%s_monthly'%avg)
if not os.path.exists(monthdir):
print "Creating new output directory " + monthdir
os.makedirs(monthdir)
files = os.listdir(daydir) # get daily files
files = [f for f in files if '-' in f and f.endswith('.nc')]
if len(files) < 28:
print 'No month is yet complete, skipping monthly average'
return
fileinfo = {}
for filename in files: # parse date from each of them
date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m-%d')
fileinfo[filename] = date
years = [d.year for d in fileinfo.values()] # get actual years
months = set([d.month for d in fileinfo.values()]) # get actual months
sd = datetime.datetime(min(years),1,1)
ed = datetime.datetime(max(years)+1,1,1)
while sd < ed:
nd = sd + relativedelta(months=+1)
ndays_in_month = (nd-sd).days
avg_files = [os.path.join(daydir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]
if len(avg_files) != ndays_in_month: # only once month complete
#print 'New month (%02d) is not yet complete, skipping monthly average'%(sd.month)
pass
else:
targetfile = os.path.join(monthdir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y-%m')))
if not os.path.exists(targetfile):
print "New month (%02d) is complete, I have %d days for the next file"%(sd.month,ndays_in_month)
command = ['ncra','-O']+ avg_files + [targetfile]
status = subprocess.check_call(command)
else:
pass
sd = nd
def yearly_avg(dacycle,avg):
""" Function to average a set of files in a folder from monthly to yearly means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
monthdir = os.path.join(analysisdir , 'data_%s_monthly'%avg )
yeardir = os.path.join(analysisdir,'data_%s_yearly'%avg)
if not os.path.exists(yeardir):
print "Creating new output directory " + yeardir
os.makedirs(yeardir)
files = os.listdir(monthdir) # get monthly files
files = [f for f in files if '-' in f and f.endswith('.nc')]
if not files:
print "No full year finished yet, skipping yearly average..."
return
fileinfo = {}
for filename in files:
date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m')
fileinfo[filename] = date
years = set([d.year for d in fileinfo.values()])
sd = datetime.datetime(min(years),1,1)
ed = datetime.datetime(max(years)+1,1,1)
while sd < ed:
nd = sd + relativedelta(years=+1)
avg_files = [os.path.join(monthdir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]
if not len(avg_files) == 12 :
print "Year %04d not finished yet, skipping yearly average..."%sd.year
else:
targetfile = os.path.join(yeardir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y')))
if not os.path.exists(targetfile):
print "Year %04d is complete, I have 12 months for the next file"%sd.year
command = ['ncra','-O']+ avg_files + [targetfile]
status = subprocess.check_call(command)
sd = nd
def longterm_avg(dacycle,avg):
""" Function to average a set of files in a folder from monthly to yearly means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
yeardir = os.path.join(analysisdir , 'data_%s_yearly'%avg )
longtermdir = os.path.join(analysisdir,'data_%s_longterm'%avg)
if not os.path.exists(longtermdir):
print "Creating new output directory " + longtermdir
os.makedirs(longtermdir)
files = os.listdir(yeardir)
files = [f for f in files if '-' in f and f.endswith('.nc')]
if not files:
print "No full year finished yet, skipping longterm average..."
return
dates = []
for filename in files:
date=datetime.datetime.strptime(filename.split('.')[-2],'%Y')
dates.append( date )
avg_files = [os.path.join(yeardir,k) for k in files]
if len(avg_files) > 0 :
command = ['ncra','-O']+ avg_files + [os.path.join(longtermdir,'%s_fluxes.%04d-%04d.nc'%(avg,min(dates).year, max(dates).year))]
status = subprocess.check_call(command)
if __name__ == "__main__":
from da.tools.initexit import CycleControl
sys.path.append('../../')
dacycle = CycleControl(args={'rc':'../../ctdas-ei-nobcb-zoom-ecoregions.rc'})
dacycle.setup()
dacycle.parse_times()
while dacycle['time.end'] < dacycle['time.finish']:
time_avg(dacycle,avg='flux1x1')
time_avg(dacycle,avg='transcom')
time_avg(dacycle,avg='transcom_extended')
time_avg(dacycle,avg='olson')
time_avg(dacycle,avg='olson_extended')
time_avg(dacycle,avg='country')
dacycle.advance_cycle_times()
......@@ -35,7 +35,7 @@ by oceans, sea, or open water. The aggregation will thus work best on arrays tha
"""
import sys
import cPickle
import pickle
import os
sys.path.append('../../')
rootdir = os.getcwd().split('da/')[0]
......@@ -45,9 +45,9 @@ from numpy import sum, array
try:
from dbfpy import dbf
except:
print "the python DBF lib might be needed, please install from:"
print "http://dbfpy.sourceforge.net/"
print " Trying to complete anyway..."
print("the python DBF lib might be needed, please install from:")
print("http://dbfpy.sourceforge.net/")
print(" Trying to complete anyway...")
sys.path.append('../../')
from da.analysis.tools_regions import globarea
......@@ -154,7 +154,7 @@ def get_countrydict():
file = os.path.join(analysisdir,'country_dictionary.dat')
try:
countrydict = cPickle.load(open(file, 'rb'))
countrydict = pickle.load(open(file, 'rb'))
except:
db = dbf.Dbf(os.path.join(analysisdir,'GRIDCTRY.DBF'))
......@@ -194,7 +194,7 @@ def get_countrydict():
db.close()
cPickle.dump(countrydict, open(file, 'wb'), -1)
pickle.dump(countrydict, open(file, 'wb'), -1)
return countrydict
......@@ -205,24 +205,24 @@ if __name__ == "__main__":
area = globarea()
areas = []
for k, v in countrydict.items():
for k, v in list(countrydict.items()):
ar = v.agg_1x1(area) / 1.e6
areas.append((ar, k))
areas.sort()
areas.reverse()
for a in areas: print a
for a in areas: print(a)
v = countrydict['Ocean']
print v.agg_1x1(area)
print(v.agg_1x1(area))
v = countrydict['Netherlands']
print v.agg_1x1(area)
print(v.agg_1x1(area))
v = countrydict['Slovakia']
print v.agg_1x1(area)
print(v.agg_1x1(area))
v = countrydict['Czech Republic']
print v.agg_1x1(area)
print(v.agg_1x1(area))
v = countrydict['Czechoslovakia']
print v.agg_1x1(area)
print(v.agg_1x1(area))
......
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu.
This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation,
version 3. This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program. If not, see <http://www.gnu.org/licenses/>."""
#!/usr/bin/env python
# tools_country.py
"""
Author : peters
Revision History:
File created on 25 Jul 2008.
This module provides an interface to go from arrays of gridded data to aggregates for countries.
It uses the country information from the UNEP database, and the database created by them for this purpose. See:
http://na.unep.net/metadata/unep/GRID/GRIDCTRY.html
The module sets up a class 'countryinfo' that holds a number of attributes. These attributes can be used to
extract gridded information about the country. A build-in method agg_1x1 is provided for convencience.
The routine get_countrydict() creates a dictionary with this information for a large number of countries.
CAUTION:
The country data only covers the land areas of a nation and aggregation will exclude the fraction of a land covered
by oceans, sea, or open water. The aggregation will thus work best on arrays that are *not* defined by unit area!
"""
import sys
import cPickle
import os
sys.path.append('../../')
rootdir = os.getcwd().split('da/')[0]
analysisdir = os.path.join(rootdir, 'da/analysis')
from numpy import sum, array
try:
from dbfpy import dbf
except:
print "the python DBF lib might be needed, please install from:"
print "http://dbfpy.sourceforge.net/"
print " Trying to complete anyway..."
sys.path.append('../../')
from da.analysis.tools_regions import globarea
EU25 = ["Austria", "Belgium", "Cyprus", "Czech Republic", "Denmark", "Estonia", "Finland", "France", "Germany", "Greece", "Hungary", "Ireland", "Italy", "Latvia", "Lithuania", "Luxembourg", "Malta", "Netherlands", "Poland", "Portugal", "Slovakia", "Slovenia", "Spain", "Sweden", "United Kingdom"]
EU27 = ["Austria", "Belgium", "Bulgaria", "Cyprus", "Czech Republic", "Denmark", "Estonia", "Finland", "France", "Germany", "Greece", "Hungary", "Ireland", "Italy", "Latvia", "Lithuania", "Luxembourg", "Malta", "Netherlands", "Poland", "Portugal", "Romania", "Slovakia", "Slovenia", "Spain", "Sweden", "United Kingdom"]
G8 = [ "France", "Germany", "Italy", "Japan", "United Kingdom", "United States"]
annex1 = [ "Australia", "Austria", "Belarus", "Belgium", "Bulgaria", "Canada", "Croatia", "Czech Republic", "Denmark", "European Union", "Estonia", "Finland", "France", "Germany", "Greece", "Hungary", "Iceland", "Ireland", "Italy", "Japan", "Latvia", "Liechtenstein", "Lithuania", "Luxembourg", "Monaco", "Netherlands", "New Zealand", "Norway", "Poland", "Portugal", "Romania", "Russia", "Slovakia", "Slovenia", "Spain", "Sweden", "Switzerland", "Turkey", "Ukraine", "United Kingdom", "United States"]
annex2 = [ "Australia", "Austria", "Belgium", "Canada", "Denmark", "Finland", "France", "Germany", "Greece", "Iceland", "Ireland", "Italy", "Japan", "Luxembourg", "Netherlands", "New Zealand", "Norway", "Portugal", "Spain", "Sweden", "Switzerland", "Turkey", "United Kingdom", "United States"]
class countryinfo(object):
""" Defines the information about 1x1 gridboxes covering each country """
def __init__(self, code):
self.country = code
self.ngrids = 0
self.gridij = []
self.gridnr = []
self.gridcoords = []
self.gridlandmask = []
self.gridsharedocean = []
self.gridsharedborder = []
def add_gridinfo(self, grid_i, grid_j, gridlandmask, shared_border, shared_water):
""" add information from one gridbox to the object """
self.gridij.append((grid_j, grid_i,)) # add tuple with j,i coordinates
lon = -180 + grid_i + 0.5
lat = -90 + grid_j + 0.5
self.gridcoords.append((lat, lon,)) # add tuple with lon,lat coordinates
self.gridnr.append(grid_j * 360 + grid_i) # add grid number for take() function
self.gridlandmask.append(gridlandmask) # this gives the total fraction of land
self.gridsharedocean.append(shared_water)
self.gridsharedborder.append(shared_border)
self.ngrids = self.ngrids + 1
def agg_1x1(self, field):
""" aggregate a 1x1 input field to country total """
#print field.take(self.gridnr)
#print self.gridlandmask
return (field.take(self.gridnr) * array(self.gridlandmask)).sum()
def __str__(self):
return ' Country name : %s\n' % self.country + \
' Number of gridpoints : %d ' % self.ngrids
#' Gridpoint indices : %s ' % self.gridnr
def fix_eu(rec):
""" fix Czech Republic and Slovakia manually """
alternative_slov = {
'140202': (2.0, 28.1), \
'140201': (2.0, 34.0), \
'140200': (2.0, 44.0), \
'140199': (3.0, 25.5), \
'139203': (3.0, 18.9), \
'139202': (2.0, 57.5), \
'139201': (2.0, 59.7), \
'139200': (2.0, 87.2), \
'139199': (1.0, 100.0), \
'139198': (2.0, 72.8), \
'138198': (3.0, 7.7), \
'138199':(2.0, 10.0) }
alternative_czech = {
'141193': (2.0, 23.0), \
'141194': (2.0, 62.1), \
'141195': (3.0, 89.5), \
'141196': (2.0, 79.4), \
'141197': (2.0, 42.3), \
'141198': (2.0, 24.5), \
'141199': (2.0, 0.1), \
'140193': (2.0, 20.6), \
'140194': (2.0, 88.9), \
'140195': (1.0, 100.0), \
'140196': (1.0, 100.0), \
'140197': (1.0, 100.0), \
'140198': (1.0, 100.0), \
'140199': (3.0, 50.0), \
'139195': (2.0, 70.6), \
'139196': (2.0, 12.4), \
'139197': (2.0, 30.9), \
'139198': (2.0, 25.0) }
id = str(int(rec['GRID']))
for dict in [alternative_slov, alternative_czech]:
if id in dict:
rec['COVER_ID'] = dict[id][0]
rec['RATE_IN_GR'] = dict[id][1]
return rec
def get_countrydict():
""" Create a dictionary with grid-to-country information from a dbf file"""
countrydict = countryinfo('Test')
file = os.path.join(analysisdir,'country_dictionary.dat')
try:
countrydict = cPickle.load(open(file, 'rb'))
except:
db = dbf.Dbf(os.path.join(analysisdir,'GRIDCTRY.DBF'))
countrydict = {}
for n, rec in enumerate(db):
code = rec['COUNTRY']
gridid = str(int(rec['GRID']))
if code in ['Czech Republic', 'Slovakia']:
rec = fix_eu(rec)
rate_in_gr = rec['RATE_IN_GR'] * 1.e-2
i = int(gridid[-3::])
j = int(gridid[0:-3])
lat = -91 + j + 0.5
lon = -181 + i + 0.5
if code in countrydict:
a = countrydict[code]
else:
a = countryinfo(code)
shared_border = False
shared_water = False
if rec['COVER_ID'] == 0.0:
shared_border = False
shared_water = True
if rec['COVER_ID'] >= 2.0:
shared_border = True
if rec['COVER_ID'] >= 10.0:
shared_water = True
a.add_gridinfo(i - 1, j - 1, rate_in_gr, shared_border, shared_water)
countrydict[code] = a
db.close()
cPickle.dump(countrydict, open(file, 'wb'), -1)
return countrydict
if __name__ == "__main__":
countrydict = get_countrydict()
area = globarea()
areas = []
for k, v in countrydict.items():
ar = v.agg_1x1(area) / 1.e6
areas.append((ar, k))
areas.sort()
areas.reverse()
for a in areas: print a
v = countrydict['Ocean']
print v.agg_1x1(area)
v = countrydict['Netherlands']
print v.agg_1x1(area)
v = countrydict['Slovakia']
print v.agg_1x1(area)
v = countrydict['Czech Republic']
print v.agg_1x1(area)
v = countrydict['Czechoslovakia']
print v.agg_1x1(area)
......@@ -13,7 +13,7 @@ program. If not, see <http://www.gnu.org/licenses/>."""
#!/usr/bin/env python
import numpy as np
import cPickle
import pickle
from da.analysis.tools_transcom import *
# Aggregated olson ecosystem regions for CT Europe
......@@ -27,8 +27,8 @@ aggregates = {
"IceWaterDeserts" : (12, 18, 19) \
}
ext_econams = [a for a, b in aggregates.iteritems()]
ext_ecocomps = [b for a, b in aggregates.iteritems()]
ext_econams = [a for a, b in aggregates.items()]
ext_ecocomps = [b for a, b in aggregates.items()]
eco19_to_ecosums = zeros((19, 6), float)
for i, k in enumerate(ext_ecocomps):
......@@ -48,7 +48,7 @@ def state_to_grid(values, regionmap, reverse=False, avg=False, mapname=None):
if not mapname:
raise Exception
regionselect = cPickle.load(open('%s_regiondict.pickle' % mapname, 'rb'))
regionselect = pickle.load(open('%s_regiondict.pickle' % mapname, 'rb'))
except:
# dictionary for region <-> map conversions
......@@ -60,14 +60,14 @@ def state_to_grid(values, regionmap, reverse=False, avg=False, mapname=None):
regionselect = regs
cPickle.dump(regionselect, open('%s_regiondict.pickle' % mapname, 'wb'), -1)
print 'Pickling region map'
pickle.dump(regionselect, open('%s_regiondict.pickle' % mapname, 'wb'), -1)
print('Pickling region map')
if reverse:
""" project 1x1 degree map onto ecoregions """
result = np.zeros(nregions, float)
for k, v in regionselect.iteritems():
for k, v in regionselect.items():
if avg:
result[k - 1] = values.ravel().take(v).mean()
else :
......@@ -78,7 +78,7 @@ def state_to_grid(values, regionmap, reverse=False, avg=False, mapname=None):
""" project ecoregion properties onto 1x1 degree map """
result = np.zeros((180, 360,), float)
for k, v in regionselect.iteritems():
for k, v in regionselect.items():
result.put(v, values[k - 1])
return result
......@@ -96,7 +96,7 @@ def globarea(im=360, jm=180, silent=True):
dxy = dxx * (np.sin(lat + dyy) - np.sin(lat)) * radius ** 2
area = np.resize(np.repeat(dxy, im, axis=0) , [jm, im])
if not silent:
print 'total area of field = ', np.sum(area.flat)
print 'total earth area = ', 4 * np.pi * radius ** 2
print('total area of field = ', np.sum(area.flat))
print('total earth area = ', 4 * np.pi * radius ** 2)
return area
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu.
This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation,
version 3. This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program. If not, see <http://www.gnu.org/licenses/>."""
#!/usr/bin/env python
import numpy as np
import cPickle
from da.analysis.tools_transcom import *
# Aggregated olson ecosystem regions for CT Europe
aggregates = {
"Forest" : (1, 2, 3, 5, 8, 10,) , \
"Grass" : (4, 6, 13) , \
"Crops": (14,) , \
"Tundra" : (7, 9, 16) , \
"Coastal" : (11, 15, 17) , \
"IceWaterDeserts" : (12, 18, 19) \
}
ext_econams = [a for a, b in aggregates.iteritems()]
ext_ecocomps = [b for a, b in aggregates.iteritems()]
eco19_to_ecosums = zeros((19, 6), float)
for i, k in enumerate(ext_ecocomps):
indices = [x - 1 for x in k]
eco19_to_ecosums[:, i].put(indices, 1.0)
##### END OF REGION DEFINITIONS
def state_to_grid(values, regionmap, reverse=False, avg=False, mapname=None):
"""
This method converts parameters from a CarbonTracker StateVector object to a gridded map of linear multiplication values. These
can subsequently be used in the transport model code to multiply/manipulate fluxes
"""
nregions = regionmap.max()
try:
if not mapname:
raise Exception
regionselect = cPickle.load(open('%s_regiondict.pickle' % mapname, 'rb'))
except:
# dictionary for region <-> map conversions
regs = {}
for r in np.arange(1, nregions + 1):
sel = (regionmap.flat == r).nonzero()
if len(sel[0]) > 0:
regs[r] = sel
regionselect = regs
cPickle.dump(regionselect, open('%s_regiondict.pickle' % mapname, 'wb'), -1)
print 'Pickling region map'
if reverse:
""" project 1x1 degree map onto ecoregions """
result = np.zeros(nregions, float)
for k, v in regionselect.iteritems():
if avg:
result[k - 1] = values.ravel().take(v).mean()
else :
result[k - 1] = values.ravel().take(v).sum()
return result
else:
""" project ecoregion properties onto 1x1 degree map """
result = np.zeros((180, 360,), float)
for k, v in regionselect.iteritems():
result.put(v, values[k - 1])
return result
def globarea(im=360, jm=180, silent=True):
""" Function calculates the surface area according to TM5 definitions"""
radius = 6.371e6 # the earth radius in meters
deg2rad = np.pi / 180.
g = 9.80665
dxx = 360.0 / im * deg2rad
dyy = 180.0 / jm * deg2rad
lat = np.arange(-90 * deg2rad, 90 * deg2rad, dyy)
dxy = dxx * (np.sin(lat + dyy) - np.sin(lat)) * radius ** 2
area = np.resize(np.repeat(dxy, im, axis=0) , [jm, im])
if not silent:
print 'total area of field = ', np.sum(area.flat)
print 'total earth area = ', 4 * np.pi * radius ** 2
return area
......@@ -104,7 +104,7 @@ def monthgen(sd, ed):
""" Generate sequence of datetime objects spaced by one month"""
from pylab import arange
if ed < sd:
raise ValueError, 'start date exceeds end date'
raise ValueError('start date exceeds end date')
sys.exit(2)
dates = []
for year in arange(sd.year, ed.year + 2):
......@@ -166,7 +166,7 @@ def yearly_avg(time, data, sdev=False):
weights = weights
if weights.shape[0] != data.shape[0]:
raise ValueError, 'yearly_avg has wrongly shaped weights (%d) for data of (%d)' % (weights.shape[0], data.shape[0])
raise ValueError('yearly_avg has wrongly shaped weights (%d) for data of (%d)' % (weights.shape[0], data.shape[0]))
sel = (weights != 0.0).nonzero()[0]
#print sel,array(time).take(sel),dd,ddnext
......@@ -180,7 +180,7 @@ def yearly_avg(time, data, sdev=False):
avg_data = (weights.take(sel)[:, newaxis, newaxis] * data.take(sel, axis=0)).sum(axis=0).squeeze()
std_data = (weights.take(sel)[:, newaxis, newaxis] * data.take(sel, axis=0)).std(axis=0).squeeze()
else:
raise ValueError, 'yearly_avg takes 1, 2, or 3d arrays only'
raise ValueError('yearly_avg takes 1, 2, or 3d arrays only')
elif len(weights) == 1:
avg_data = data[0]
std_data = 0.0
......@@ -225,7 +225,7 @@ def monthly_avg(time, data, sdev=False):
weights = weights
if weights.shape[0] != data.shape[0]:
raise ValueError, 'yearly_avg has wrongly shaped weights (%d) for data of (%d)' % (weights.shape[0], data.shape[0])
raise ValueError('yearly_avg has wrongly shaped weights (%d) for data of (%d)' % (weights.shape[0], data.shape[0]))
sel = (weights != 0.0).nonzero()[0]
#print sel,array(time).take(sel),dd,nextmonth(dd)
......@@ -239,7 +239,7 @@ def monthly_avg(time, data, sdev=False):
avg_data = (weights.take(sel)[:, newaxis, newaxis] * data.take(sel, axis=0)).sum(axis=0).squeeze()
std_data = (weights.take(sel)[:, newaxis, newaxis] * data.take(sel, axis=0)).std(axis=0).squeeze()
else:
raise ValueError, 'monthly_avg takes 1, 2, or 3d arrays only'
raise ValueError('monthly_avg takes 1, 2, or 3d arrays only')
elif len(weights) == 1:
avg_data = data[0]
std_data = 0.0
......@@ -287,7 +287,7 @@ def season_avg(time, data, sdev=False):
weights = weights
if weights.shape[0] != data.shape[0]:
raise ValueError, 'yearly_avg has wrongly shaped weights (%d) for data of (%d)' % (weights.shape[0], data.shape[0])
raise ValueError('yearly_avg has wrongly shaped weights (%d) for data of (%d)' % (weights.shape[0], data.shape[0]))
sel = (weights != 0.0).nonzero()[0]
#print sel,array(time).take(sel),dd,nextmonth(dd)
......@@ -301,7 +301,7 @@ def season_avg(time, data, sdev=False):
avg_data = (weights.take(sel)[:, newaxis, newaxis] * data.take(sel, axis=0)).sum(axis=0).squeeze()
std_data = (weights.take(sel)[:, newaxis, newaxis] * data.take(sel, axis=0)).std(axis=0).squeeze()
else:
raise ValueError, 'season_avg takes 1, 2, or 3d arrays only'
raise ValueError('season_avg takes 1, 2, or 3d arrays only')
elif len(weights) == 1:
avg_data = data[0]
std_data = 0.0
......@@ -339,7 +339,7 @@ def longterm_avg(time, data):
if __name__ == '__main__':
#print monthgen(datetime(2000,1,1),datetime(2006,5,1))
dd = datetime(2002, 3, 1)
print nextmonth(dd), dd
print(nextmonth(dd), dd)
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu.
This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation,
version 3. This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program. If not, see <http://www.gnu.org/licenses/>."""
#! /usr/bin/env python
import sys
import calendar
import copy
from datetime import datetime, timedelta
from da.tools.general import date2num, num2date
from numpy import array, zeros, newaxis, logical_and, arange
def Fromdatetime(date):
dt = date.timetuple()
return datetime(*dt[0:6])
def increase_time(dd, **kwargs):
""" Function increases the time by specified amount"""
return dd + timedelta(**kwargs)
def chardate(dd, cut=8):
return dd.strftime('%Y%m%d%H%M')[0:cut]
def timegen(sd, ed, dt):
dd = []
while sd <= ed:
dd.append(Fromdatetime(sd))
sd = sd + dt
return dd
def itau2datetime(itau, iyear0):
""" Function returns a datetime object from TM5s itau times"""
date0 = datetime(iyear0, 1, 1, 0, 0, 0)
if len(itau) == 1:
itau = [itau]
for time in itau:
sec = time % 60
min = (time / 60) % 60
hrs = (time / 3600) % 24
day = (time / 86400)
dt = timedelta(days=day, hours=hrs, minutes=min, seconds=sec)
yield date0 + dt
def date2dec(date):
""" Function converts datetime object to a Decimal number time (e.g., 1991.875 such as in IDL, CCG) """
if not isinstance(date, list): date = [date]
newdate = []
for dd in date:
Days0 = date2num(datetime(dd.year, 1, 1))
if calendar.isleap(dd.year):
DaysPerYear = 366.
else:
DaysPerYear = 365.
DayFrac = date2num(dd)
newdate.append(dd.year + (DayFrac - Days0) / DaysPerYear)
if len(newdate) == 1: return newdate[0]
return newdate
def dec2date(dectime):
""" Function converts decimal time from year fraction (e.g., 1991.875 such as in IDL, CCG) to a python datetime object """
dt = num2date(dec2num(dectime)).timetuple()
return datetime(*dt[0:7])
def dec2num(dectime):
""" Function converts decimal time from year fraction (e.g., 1991.875 such as in IDL, CCG) to a python decimal numtime """
from pylab import floor, drange, num2date, date2num
if not isinstance(dectime, list): dectime = [dectime]
newdectime = []
for dd in dectime:
yr = floor(dd)
Days0 = date2num(datetime(int(yr), 1, 1))
if calendar.isleap(yr):
DaysPerYear = 366.
else:
DaysPerYear = 365.
DayFrac = (dd - yr) * DaysPerYear
newdectime.append(Days0 + DayFrac)
if len(newdectime) == 1: return newdectime[0]
return newdectime
def num2dec(numtime):
""" Function converts python decimal numtime to an IDL decimal time """
from pylab import floor, drange, num2date, date2num
res = date2dec(num2mydate(numtime))
return res
def num2mydate(num):
""" Function converts decimal time from year fraction (e.g., 1991.875 such as in IDL, CCG) to a python datetime object """
dt = num2date(num).timetuple()
return datetime(*dt[0:7])
def monthgen(sd, ed):
""" Generate sequence of datetime objects spaced by one month"""
from pylab import arange
if ed < sd:
raise ValueError, 'start date exceeds end date'
sys.exit(2)
dates = []
for year in arange(sd.year, ed.year + 2):
for month in arange(1, 13):
date = datetime(year, month, 1)
if date > ed: return dates
else: dates.append(date)
def nextmonth(dd):
""" Find next 1st of the month following the date dd"""
if dd.month == 12:
cc = dd.replace(year=dd.year + 1)
ee = cc.replace(month=1)
else:
ee = dd.replace(month=dd.month + 1)
ff = ee.replace(day=1)
return ff
def in_interval(start, stop, times_in):
""" returns a list of fractions in time interval """
times = copy.copy(times_in)
interval = times[1] - times[0]
times.append(times[-1] + interval) # extend by one interval
times_filled = [times[0] + timedelta(days=d) for d in range((times[-1] - times[0]).days)]
b = []
in_int = 0.0
for t in times_filled: # loop over days
if t in times[1:]: # if new interval starts
b.append(in_int) # add previous aggregate to output
in_int = 0.0 # reset counter
in_int += int(logical_and(t >= start, t < stop)) # count if in interval [start,stop >
b.append(in_int)
if len(b) != len(times_in) : raise ValueError
return b
def yearly_avg(time, data, sdev=False):
""" make monthly average from array using rundat and data"""
years = array([d.year for d in time])
aa = []
ss = []
tt = []
dd = time[0]
ed = time[-1]
while dd <= ed:
ddnext = datetime(dd.year + 1, 1, 1)
weights = in_interval(dd, ddnext, time)
if len(weights) > 1:
weights = array(weights)
if weights.sum() > 0.0:
weights = weights / weights.sum()
else:
weights = weights
if weights.shape[0] != data.shape[0]:
raise ValueError, 'yearly_avg has wrongly shaped weights (%d) for data of (%d)' % (weights.shape[0], data.shape[0])
sel = (weights != 0.0).nonzero()[0]
#print sel,array(time).take(sel),dd,ddnext
if data.ndim == 1:
avg_data = (weights.take(sel) * data.take(sel, axis=0)).sum(axis=0)
std_data = (weights.take(sel) * data.take(sel, axis=0)).std(axis=0)
elif data.ndim == 2:
avg_data = (weights.take(sel)[:, newaxis] * data.take(sel, axis=0)).sum(axis=0).squeeze()
std_data = (weights.take(sel)[:, newaxis] * data.take(sel, axis=0)).std(axis=0).squeeze()
elif data.ndim == 3:
avg_data = (weights.take(sel)[:, newaxis, newaxis] * data.take(sel, axis=0)).sum(axis=0).squeeze()
std_data = (weights.take(sel)[:, newaxis, newaxis] * data.take(sel, axis=0)).std(axis=0).squeeze()
else:
raise ValueError, 'yearly_avg takes 1, 2, or 3d arrays only'
elif len(weights) == 1:
avg_data = data[0]
std_data = 0.0
else:
continue # next year
aa.append(avg_data)
ss.append(std_data)
tt.append(datetime(dd.year, 6, 15))
dd = ddnext
aa = array(aa).squeeze()
ss = array(ss).squeeze()
time = tt
if len(tt) == 1:
aa = aa.reshape(1, *aa.shape)
ss = ss.reshape(1, *ss.shape)
if sdev: return time, aa, ss
else : return time, aa
def monthly_avg(time, data, sdev=False):
""" make monthly average from array using rundat and data"""
years = array([d.year for d in time])
months = array([d.month for d in time])
mm = []
ss = []
tt = []
dd = time[0]
ed = time[-1]
while dd <= ed:
ddnext = nextmonth(dd)
weights = in_interval(dd, ddnext, time)
if len(weights) > 1:
weights = array(weights)
if weights.sum() > 0.0:
weights = weights / weights.sum()
else:
weights = weights
if weights.shape[0] != data.shape[0]:
raise ValueError, 'yearly_avg has wrongly shaped weights (%d) for data of (%d)' % (weights.shape[0], data.shape[0])
sel = (weights != 0.0).nonzero()[0]
#print sel,array(time).take(sel),dd,nextmonth(dd)
if data.ndim == 1:
avg_data = (weights.take(sel) * data.take(sel, axis=0)).sum(axis=0)
std_data = (weights.take(sel) * data.take(sel, axis=0)).std(axis=0)
elif data.ndim == 2:
avg_data = (weights.take(sel)[:, newaxis] * data.take(sel, axis=0)).sum(axis=0).squeeze()
std_data = (weights.take(sel)[:, newaxis] * data.take(sel, axis=0)).std(axis=0).squeeze()
elif data.ndim == 3:
avg_data = (weights.take(sel)[:, newaxis, newaxis] * data.take(sel, axis=0)).sum(axis=0).squeeze()
std_data = (weights.take(sel)[:, newaxis, newaxis] * data.take(sel, axis=0)).std(axis=0).squeeze()
else:
raise ValueError, 'monthly_avg takes 1, 2, or 3d arrays only'
elif len(weights) == 1:
avg_data = data[0]
std_data = 0.0
else:
continue # next month
mm.append(avg_data)
ss.append(std_data)
tt.append(datetime(dd.year, dd.month, 15))
dd = ddnext
mm = array(mm).squeeze()
ss = array(ss).squeeze()
time = tt
if len(tt) == 1:
mm = mm.reshape(-1, *mm.shape)
ss = ss.reshape(-1, *ss.shape)
if sdev: return time, mm, ss
else : return time, mm
def season_avg(time, data, sdev=False):
""" make season average from array using rundat and data"""
seas = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
mm = []
ss = []
tt = []
dd = time[0]
ed = time[-1]
while dd <= ed:
ddmid = nextmonth(dd)
ddnext = nextmonth(nextmonth(nextmonth(dd)))
weights = in_interval(dd, ddnext, time)
if len(weights) > 1:
weights = array(weights)
if weights.sum() > 0.0:
weights = weights / weights.sum()
else:
weights = weights
if weights.shape[0] != data.shape[0]:
raise ValueError, 'yearly_avg has wrongly shaped weights (%d) for data of (%d)' % (weights.shape[0], data.shape[0])
sel = (weights != 0.0).nonzero()[0]
#print sel,array(time).take(sel),dd,nextmonth(dd)
if data.ndim == 1:
avg_data = (weights.take(sel) * data.take(sel, axis=0)).sum(axis=0)
std_data = (weights.take(sel) * data.take(sel, axis=0)).std(axis=0)
elif data.ndim == 2:
avg_data = (weights.take(sel)[:, newaxis] * data.take(sel, axis=0)).sum(axis=0).squeeze()
std_data = (weights.take(sel)[:, newaxis] * data.take(sel, axis=0)).std(axis=0).squeeze()
elif data.ndim == 3:
avg_data = (weights.take(sel)[:, newaxis, newaxis] * data.take(sel, axis=0)).sum(axis=0).squeeze()
std_data = (weights.take(sel)[:, newaxis, newaxis] * data.take(sel, axis=0)).std(axis=0).squeeze()
else:
raise ValueError, 'season_avg takes 1, 2, or 3d arrays only'
elif len(weights) == 1:
avg_data = data[0]
std_data = 0.0
else:
continue # next month
mm.append(avg_data)
ss.append(std_data)
tt.append(datetime(ddmid.year, ddmid.month, 15))
dd = ddnext
mm = array(mm).squeeze()
ss = array(ss).squeeze()
time = tt
if len(tt) == 1:
mm = mm.reshape(-1, *mm.shape)
ss = ss.reshape(-1, *ss.shape)
if sdev: return time, mm, ss
else : return time, mm
def longterm_avg(time, data):
""" Create long term mean """
time_avg = num2date(date2num(time).mean())
data_avg = data.mean(axis=0)
return time_avg, data_avg
if __name__ == '__main__':
#print monthgen(datetime(2000,1,1),datetime(2006,5,1))
dd = datetime(2002, 3, 1)
print nextmonth(dd), dd
......@@ -98,7 +98,7 @@ for k in keys:
if 'shortname' in k:
ext_transshort.append(getattr(cdf_temp, k))
if 'component' in k:
ext_transcomps.append(map(int, getattr(cdf_temp, k).split(',')))
ext_transcomps.append(list(map(int, getattr(cdf_temp, k).split(','))))
cdf_temp.close()
......@@ -291,18 +291,18 @@ def cov2corr(A):
""" function projects 1x1 degree map onto TransCom regions by adding gridboxes over larger areas """
from hdf2field import Sds2field
import cPickle
import pickle
import os
from plottools import rebin
transcommapfile = 'tc_land11_oif30.hdf'
transcomconversionfile = 'map_to_tc.pickle'
try:
regionselect = cPickle.load(open(transcomconversionfile, 'rb'))
regionselect = pickle.load(open(transcomconversionfile, 'rb'))
except:
# read map from NetCDF
print '[in map_to_tc() in tctools.py:] ' + \
'Creating conversion map and pickle file for future quick use, patience please...'
print('[in map_to_tc() in tctools.py:] ' + \
'Creating conversion map and pickle file for future quick use, patience please...')
map = Sds2field(transcommapfile, 'tc_region')
# create dictionary for region <-> map conversions based on 1x1 map
......@@ -314,10 +314,10 @@ def cov2corr(A):
if len(sel[0]) > 0:
regs[r] = sel
regionselect = regs
dummy = cPickle.dump(regionselect, open(transcomconversionfile, 'wb'), -1)
dummy = pickle.dump(regionselect, open(transcomconversionfile, 'wb'), -1)
result = zeros(len(regionselect.keys()), float)
for k, v in regionselect.iteritems():
result = zeros(len(list(regionselect.keys())), float)
for k, v in regionselect.items():
result[k - 1] = data.ravel().take(v).sum()
return result
......@@ -332,7 +332,7 @@ def cov2corr(A):
return (transnams[reg - 1],)
elif eco:
if reg > rundat.npparameters:
raise IOError, 'Region number exceeds definitions'
raise IOError('Region number exceeds definitions')
elif reg > rundat.n_land and reg != rundat.nparameters:
ret = ('Ocean', oifnams[reg - rundat.n_land - 1])
elif reg > 209 and reg <= rundat.n_land:
......@@ -346,12 +346,12 @@ def cov2corr(A):
return (econames[(reg - 1) % 19],)
if __name__ == '__main__':
print transnams
print transshort
print ext_transnams
print ext_transshort
print olsonnams
print olsonshort
print ext_transcomps
print olsonextnams
print(transnams)
print(transshort)
print(ext_transnams)
print(ext_transshort)
print(olsonnams)
print(olsonshort)
print(ext_transcomps)
print(olsonextnams)
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu.
This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation,
version 3. This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program. If not, see <http://www.gnu.org/licenses/>."""
#!/usr/bin/env python
import os
import sys
sys.path.append('../../')
rootdir = os.getcwd().split('da/')[0]
analysisdir = os.path.join(rootdir, 'da/analysis')
from string import join, split
from numpy import array, identity, zeros, arange, dot
import da.tools.io4 as io
# Get masks of different region definitions
matrix_file = os.path.join(analysisdir, 'copied_regions.nc')
cdf_temp = io.CT_CDF(matrix_file, 'read')
transcommask = cdf_temp.get_variable('transcom_regions')
if transcommask.max() < 23:
if 'transcom_regions_original' in cdf_temp.variables:
transcommask = cdf_temp.get_variable('transcom_regions_original')
olson240mask = cdf_temp.get_variable('regions')
olsonmask = cdf_temp.get_variable('land_ecosystems')
oifmask = cdf_temp.get_variable('ocean_regions')
dummy = cdf_temp.close()
matrix_file = os.path.join(analysisdir, 'copied_regions_extended.nc')
cdf_temp = io.CT_CDF(matrix_file, 'read')
olson_ext_mask = cdf_temp.get_variable('regions')
dummy = cdf_temp.close()
# Names and short names of TransCom regions
transshort = []
transnams = []
transland = []
temp = open(os.path.join(analysisdir, 't3_region_names'), 'r').readlines()
for line in temp:
items = line.split()
if items:
num, abbr, name = (items[0], items[1], join(items[2:]),)
transnams.append(name.strip('"'))
transshort.append(abbr)
if abbr.startswith('T'):
transland.append(name.strip('"'))
transnams.append("Non-optimized")
transshort.append("I-NNOP")
# Names and short names of Olson regions
olsonnams = []
olsonshort = []
temp = open(os.path.join(analysisdir, 'olson19_region_names'), 'r').readlines()
for line in temp:
items = line.split()
if items:
num, abbr, name = (items[0], items[1], join(items[2:]),)
olsonnams.append(name.strip('"'))
olsonshort.append(abbr)
olsonextnams = []
matrix_file = os.path.join(analysisdir, 'copied_regions_extended.nc')
cdf_temp = io.CT_CDF(matrix_file, 'read')
keys = cdf_temp.ncattrs()
keys.sort()
for k in keys:
if 'Region' in k:
olsonextnams.append(getattr(cdf_temp, k))
cdf_temp.close()
ext_transnams = []
ext_transshort = []
ext_transcomps = []
# Get names of aggregated regions for post aggregation
matrix_file = os.path.join(analysisdir, 'postagg_definitions.nc')
cdf_temp = io.CT_CDF(matrix_file, 'read')
xform = cdf_temp.get_variable('xform')
keys = cdf_temp.ncattrs()
keys.sort()
for k in keys:
if 'longname' in k:
ext_transnams.append(getattr(cdf_temp, k))
if 'shortname' in k:
ext_transshort.append(getattr(cdf_temp, k))
if 'component' in k:
ext_transcomps.append(map(int, getattr(cdf_temp, k).split(',')))
cdf_temp.close()
# Names of the ocean inversion flux regions, to go along with oifmask
oifnams = ['(1) NOCN Arctic Ocean', \
'(2) NAH North Atlantic (49 - 76N)', \
'(3) NAM North Atlantic (36 - 49N)', \
'(4) NAL North Atlantic (18 - 36N)', \
'(5) NAT North Atlantic ( 0 - 18N)', \
'(6) SAT South Atlantic ( 0 - 18S)', \
'(7) SAL South Atlantic (18 - 31S)', \
'(8) SAM South Atlantic (31 - 44S)', \
'(9) SAH South Atlantic (44 - 58S)', \
'(10) SOCN Southern Ocean (S of 58S)', \
'(11) NPHW North Pacific (N of 49N, W of 195E)', \
'(12) NPHE North Pacific (N of 36N, E of 195E)', \
'(13) NPK North Pacific (Kuroshio Extension)', \
'(14) NPLW North Pacific (18N - K.Ext, W of 195E)', \
'(15) NPLE North Pacific (18 - 36N, E of 195E)', \
'(16) NPTW North Pacific ( 0 - 18N, W of 199E)', \
'(17) NPTE North Pacific ( 0 - 18N, E of 199E)', \
'(18) SPTW South Pacific ( 0 - 18S, W of 199E)', \
'(19) SPTE South Pacific ( 0 - 18S, E of 199E)', \
'(20) SPLW South Pacific (18 - 31S, W of 233E)', \
'(21) SPLE South Pacific (18 - 31S, E of 233E)', \
'(22) SPMW South Pacific (31 - 44S, W of 248E)', \
'(23) SPME South Pacific (31 - 44S, E of 248E, W of 278E)', \
'(24) SPMC South Pacific (31 - 44S, coastal E of 278E)', \
'(25) SPH South Pacific (44 - 58S) ', \
'(26) NI North Indian', \
'(27) SIT South Indian (0 - 18S)', \
'(28) SIL South Indian (18 - 31S)', \
'(29) SIM South Indian (31 - 44S)', \
'(30) SIH South Indian (44 - 58S)']
oiflocs = [ (200, 80,), \
(330, 55,), \
(330, 40,), \
(330, 22,), \
(330, 8,), \
(350, -12,), \
(350, -27,), \
(350, -40,), \
(350, -53,), \
(200, -70,), \
(178, 54,), \
(210, 40,), \
(165, 38,), \
(178, 25,), \
(215, 25,), \
(170, 8,), \
(230, 8,), \
(175, -10,), \
(240, -10,), \
(195, -27,), \
(265, -27,), \
(195, -40,), \
(262, -40,), \
(283, -40,), \
(220, -53,), \
(68, 8,), \
(75, -10,), \
(75, -27,), \
(75, -40,), \
(75, -53,)]
translocs = [ (-177, 0), \
(-92, 53,), \
(-108, 34,), \
(-66, 4,), \
(-50, -17,), \
(15, 17,), \
(26, -12,), \
(84, 63,), \
(103, 30,), \
(115, 0,), \
(132, -25,), \
(9, 50,), \
(-174, 46,), \
(136, 6,), \
(-108, 6,), \
(-123, -15,), \
(-32, 58,), \
(-32, 38,), \
(-32, 0,), \
(-32, -38,), \
(-14, -65,), \
(68, 2,)]
#olsonshort=[str(name.split()[1:2]).join(' ') for name in olsonnams]
old_olsonshort = [join(split(name, ' ')[1:2], ' ') for name in olsonnams]
olsonlabs = ['Conifer Forest', 'Broadleaf Forest', 'Mixed Forest', 'Grass/Shrub', 'Tropical Forest', 'Scrub/Woods', 'Semitundra', 'Fields/Woods/\nSavanna', \
'Northern Taiga', 'Forest/Field', 'Wetland', 'Deserts', 'Shrub/Tree/\nSuc ', 'Crops', 'Conifer\n Snowy/Coastal', \
'Wooded tundra', 'Mangrove', 'Ice and \nPolar desert', 'Water']
ecmwfnams = [ ' 1 CRPSMF Crops, mixed farming', \
' 2 SHGRSS Short Grass', \
' 3 EVNDLF Evergreen Needleleaf', \
' 4 DECNDLF Deciduous Needleleaf', \
' 5 EVBRDLF Evergreen Broadleaf', \
' 6 DECBRLF Deciduous Broadleaf', \
' 7 TLGRSS Tall Grass', \
' 8 DES Desert', \
' 9 TDR Tundra', \
'10 IRRCR Irrigated Crops', \
'11 SMDES Semidesert', \
'12 ICE Ice Caps', \
'13 BGM Bogs and Marches', \
'14 INW Inland Water', \
'15 OCE Ocean', \
'16 EVSHRB Evergreen Shrubs', \
'17 DECSHR Deciduous shrubs', \
'18 MXFRST Mixed Forest', \
'19 INTFRST Interrupted Forest']
ecmwfshort = [str(name.split()[1:2]).join(' ') for name in ecmwfnams]
ecmwflabs = ['Crops, mixed farming', 'Short Grass', 'Evergreen Needleleaf', 'Deciduous Needleleaf', 'Evergreen Broadleaf', \
'Deciduous Broadleaf', 'Tall Grass', 'Desert', \
'Tundra', 'Irrigated Crops', 'Semidesert', 'Ice Caps', 'Bogs and Marches', 'Inland Water', 'Ocean', \
'Evergreen Shrubs', 'Deciduous shrubs', 'Mixed Forest', 'Interrupted Forest']
a = array([\
0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , \
0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 0 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , \
0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 0 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , \
0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , \
1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , \
0 , 0 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , \
0 , 0 , 0 , 0 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , \
0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , \
0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 0 , 0 , 0 , 0 , 1 , \
0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 0 , 0 , 0 , \
0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 0])
O30to11 = a.reshape(11, 30).transpose()
O11to11 = identity(11)
ntcland = 11 # TC standard
ntcocean = 11 # TC standard
Ols259_to_TC23 = zeros((259, 23), float)
Ols240_to_TC23 = zeros((240, 23), float)
Ols221_to_TC23 = zeros((221, 23), float)
for i in arange(ntcland):
Ols259_to_TC23[i * 19:(i + 1) * 19, i] = 1.0
Ols259_to_TC23[190:228, 10] = 1.0 # Europe
Ols259_to_TC23[228:258, 11:22] = O30to11
for i in arange(ntcland):
Ols240_to_TC23[i * 19:(i + 1) * 19, i] = 1.0
Ols240_to_TC23[209:239, 11:22] = O30to11
for i in arange(ntcland):
Ols221_to_TC23[i * 19:(i + 1) * 19, i] = 1.0
Ols221_to_TC23[209:220:, 11:22] = O11to11
Ols221_to_TC23[220, 22] = 1.0
Ols240_to_TC23[239, 22] = 1.0
Ols259_to_TC23[258, 22] = 1.0
ntcland = 11 # TC standard
ntcocean = 11 # TC standard
ExtendedTCRegionsFile = 'postagg_definitions.nc'
def ExtendedTCRegions(data, cov=False):
""" convert to extended transcom shaped regions"""
nparams = data.shape[-1]
if nparams != 23:
raise ValueError('Do not know how to convert %s regions to 37 extended transcom regions' % (nparams,))
M = xform
if not cov:
return dot(array(data).squeeze(), M)
else:
try:
return M.transpose().dot(data).dot(M)
except:
return dot(dot(M.transpose(), data), M) #Huygens fix
def cov2corr(A):
b = 1. / sqrt(A.diagonal())
return A * dot(b[:, newaxis], b[newaxis, :])
""" function projects 1x1 degree map onto TransCom regions by adding gridboxes over larger areas """
from hdf2field import Sds2field
import cPickle
import os
from plottools import rebin
transcommapfile = 'tc_land11_oif30.hdf'
transcomconversionfile = 'map_to_tc.pickle'
try:
regionselect = cPickle.load(open(transcomconversionfile, 'rb'))
except:
# read map from NetCDF
print '[in map_to_tc() in tctools.py:] ' + \
'Creating conversion map and pickle file for future quick use, patience please...'
map = Sds2field(transcommapfile, 'tc_region')
# create dictionary for region <-> map conversions based on 1x1 map
regs = {}
nregions = map.max()
for r in arange(1, nregions + 1):
sel = (map.flat == r).nonzero()
if len(sel[0]) > 0:
regs[r] = sel
regionselect = regs
dummy = cPickle.dump(regionselect, open(transcomconversionfile, 'wb'), -1)
result = zeros(len(regionselect.keys()), float)
for k, v in regionselect.iteritems():
result[k - 1] = data.ravel().take(v).sum()
return result
""" return name of region number reg """
if longnames:
econames = olsonnams
else :
econames = olsonshort
if tc:
return (transnams[reg - 1],)
elif eco:
if reg > rundat.npparameters:
raise IOError, 'Region number exceeds definitions'
elif reg > rundat.n_land and reg != rundat.nparameters:
ret = ('Ocean', oifnams[reg - rundat.n_land - 1])
elif reg > 209 and reg <= rundat.n_land:
ret = ('Europe', econames[(reg - 1) % 19] + "_East")
elif reg == rundat.nparameters:
ret = (transnams[-1])
else:
ret = (transnams[(reg - 1) / 19], econames[(reg - 1) % 19])
return ret
elif olson:
return (econames[(reg - 1) % 19],)
if __name__ == '__main__':
print transnams
print transshort
print ext_transnams
print ext_transshort
print olsonnams
print olsonshort
print ext_transcomps
print olsonextnams
File added