Skip to content
Snippets Groups Projects
Commit 35f5edb9 authored by karolina's avatar karolina
Browse files

No commit message

No commit message
parent c0c2e48a
Branches
No related tags found
No related merge requests found
Showing
with 3525 additions and 0 deletions
This diff is collapsed.
File added
1 CONIFOR "Conifer Forest"
2 BRODLFOR "Broadleaf Forest: temperate, subtropical drought"
3 MIXEDFOR "Mixed Forest: conifer/broadleaf; so. Boreal"
4 GRASSHRB "Grassland +/- Shrub or Tree"
5 TROPICFR "Tropical/subtr. Forest: montane, seasonal, rainforest "
6 SCRUBWDS "Scrub +/- Woodland &/or fields (evergreen/decid.) "
7 SEMIDTUN "Semidesert shrub/steppe; Tundra (polar, alpine) "
8 FLDWDSAV "Field/Woods complex &/or Savanna, tallgrass "
9 NORTAIGA "Northern Boreal Taiga woodland/tundra"
10 FORFDREV "Forest/Field; Dry Evergreen broadleaf woods "
11 WETLAND "Wetlands: mires (bog/fen); marsh/swamp +/- mangrove "
12 DESERTS "Desert: bare/alpine; sandy; salt/soda "
13 SHRBTRST "Shrub/Tree: succulent/thorn "
14 CROPSETL "Crop/Settlement (irrigated or not) "
15 CONIFRFC "Conifer snowy Rainforest, coastal "
27 WTNDMHTH "Wooded Tundra Margin; Heath/moorland "
19 MANGROVE "Mangrove/wet forest/thicket + tidal flat "
25 ICEPLDE "Ice and Polar Desert"
19 WATER "Water bodies"
File added
File added
#!/usr/bin/env python
# runinfo.py
"""
Author : peters
Revision History:
File created on 01 Mar 2011.
"""
import mysettings
import commands
import os
import sys
from datetime import datetime, timedelta
class RunInfo(object):
"""
This is a CarbonTracker run info object
Initialize it with the following keywords:
MANDATORY:
project = Name of project, should correspond to a directory [string]
sd = start date of project output [datetime object or integer yyyymmdd]
ed = end date of project output [datetime object or integer yyyymmdd]
OPTIONAL:
basedir = base directory of model output on your machine [string, default='~/Modeling/download']
outputdir= output directory for figures and summary files [string, default='~/Modeling/SEATA']
Tip: change the std_rundat dictionary in module filtertools to set new defaults for all values!
"""
def __init__(self,infodict={},rcfile=None):
""" Initialize from the name of an rcfile, or by passing the dictionary directly """
import da.tools.rc as rc
import da.tools.io4 as io
import copy
if rcfile:
infodict = rc.read(rcfile)
self.basedir = infodict['dir.da_run']
self.proj = os.path.split(self.basedir)[-1]
self.projdir = self.basedir
self.dir_scheme = mysettings.ct_dir_scheme
self.inputdir = os.path.join(self.basedir,'output')
self.outputdir = os.path.join(self.basedir,'analysis')
if not os.path.exists(self.outputdir):
os.makedirs(self.outputdir)
print "Creating new output directory "+self.outputdir
sd=infodict['time.start']
ed=infodict['time.finish']
dt=infodict['time.cycle']
self.dt = timedelta(days=int(dt))
if not isinstance(sd,datetime):
self.sd = datetime.strptime(sd,'%Y-%m-%d %H:%M:%S')
if not isinstance(ed,datetime):
self.ed = datetime.strptime(ed,'%Y-%m-%d %H:%M:%S')
dd = copy.deepcopy(self.sd)
self.inputfiles= []
self.weeks = []
self.inputdict = {}
while dd < self.ed:
filename = os.path.join(self.inputdir,'%s'%dd.strftime('%Y%m%d'),'savestate.nc')
if os.path.exists(filename):
self.inputfiles.append(filename)
self.weeks.append(dd)
self.inputdict[dd] = filename
else:
break
dd = dd+self.dt
self.ed = dd
self.nweeks = len(self.weeks)
# run parameters from file
ncf = io.CT_Read(self.inputfiles[0],'read')
self.nlag = len(ncf.dimensions['nlag'])
self.nmembers = len(ncf.dimensions['nmembers'])
self.nparameters = len(ncf.dimensions['nparameters'])
ncf.close()
self.namingscheme = 'wp_Mar2011'
filename = os.path.join('NamingScheme.'+self.namingscheme+'.rc')
try:
rcinfo=rc.read(filename)
except IOError:
print '%s was specified as naming scheme, but no matching %s rc-file was found, exiting...'%(NamingScheme,filename,)
sys.exit(1)
except:
print 'Unknown error reading rc-file: %s, exiting...'%(filename,)
sys.exit(1)
self.namedict=rcinfo
def __str__(self):
return 'project : '+self.projdir+\
'\nstart date : '+str(self.sd)+\
'\nend date : '+str(self.ed)+\
'\ndelta date : '+str(self.dt)+\
'\nnweeks : '+str(self.nweeks)+\
'\nnparameters : '+str(self.nparameters)+\
'\nnmembers : '+str(self.nmembers)+\
'\nnlag : '+str(self.nlag)+\
'\nDA output dir : '+self.inputdir+\
'\nanalysis output dir : '+self.outputdir+\
'\nnaming scheme : '+self.namingscheme
def get_rundat_settings(args):
""" create settings dict for rundat from scripts arguments """
settings=std_rundat.copy() # start with defaults for rundat
for items in args:
items=items.lower()
k, v = items.split('=')
if settings.has_key(k):
if k in std_constructors:
settings[k] = std_constructors[k](v)
else:
settings[k] = v
else: raise IOError,'Parameter unknown:%s'%(v,)
return settings
if __name__ == "__main__":
import getopt
import sys
from string import *
sys.path.append('../../')
# Parse keywords
rundat=RunInfo(rcfile='../../da.rc')
print rundat
sys.exit(0)
1 T-NABR "North American Boreal"
2 T-NATM "North American Temperate"
3 T-SATR "South American Tropical"
4 T-SATM "South American Temperate"
5 T-NAFR "Northern Africa"
6 T-SAFR "Southern Africa"
7 T-EUBR "Eurasia Boreal"
8 T-EUTM "Eurasia Temperate"
9 T-ASTR "Tropical Asia"
10 T-AUST "Australia"
11 T-EURO "Europe"
12 O-NPTM "North Pacific Temperate"
13 O-WPTR "West Pacific Tropical"
14 O-EPTR "East Pacific Tropical"
15 O-SPTM "South Pacific Temperate"
16 O-NOCN "Northern Ocean"
17 O-NATM "North Atlantic Temperate"
18 O-SATR "Atlantic Tropical"
19 O-SATM "South Atlantic Temperate"
20 O-SOCN "Southern Ocean"
21 O-INTR "Indian Tropical"
22 O-INTM "Indian Temperate"
File added
#!/usr/bin/env python
import commands
import os
import sys
from da.analysis.tools_transcom import *
# Aggregated olson ecosystem regions for CT Europe
aggregates=[\
("Forests/Wooded" , (1, 2, 3, 5, 8, 10,) ) ,\
("Grass/Shrubs" , (4, 6, 13) ) ,\
("Crops/Agriculture", (14,) ) ,\
("Tundra/Taiga" , (7,9,16) ) ,\
("Coastal/Wet" , (11,15,17) ) ,\
("Ice/Water/Deserts" , (12,18,19) ) \
]
agg_dict={}
for k,v in aggregates:
agg_dict[k]=v
ext_econams=[a for a,b in aggregates]
ext_ecocomps=[b for a,b in aggregates]
eco19_to_ecosums = zeros((19, 6),float)
for i,k in enumerate(ext_ecocomps):
indices = [x-1 for x in k]
eco19_to_ecosums[:,i].put(indices,1.0)
##### END OF REGION DEFINITIONS
def StateToGrid(values,regionmap,reverse=False,avg=False):
"""
This method converts parameters from a CarbonTracker StateVector object to a gridded map of linear multiplication values. These
can subsequently be used in the transport model code to multiply/manipulate fluxes
"""
import numpy as np
nregions = regionmap.max()
# dictionary for region <-> map conversions
regs={}
for r in np.arange(1,nregions+1):
sel=(regionmap.flat == r).nonzero()
if len(sel[0])>0: regs[r]=sel
regionselect=regs
if reverse:
""" project 1x1 degree map onto ecoregions """
result=np.zeros(nregions,float)
for k,v in regionselect.iteritems():
if avg:
result[k-1]=values.ravel().take(v).mean()
else :
result[k-1]=values.ravel().take(v).sum()
return result
else:
""" project ecoregion properties onto 1x1 degree map """
result=np.zeros((180,360,),float)
for k,v in regionselect.iteritems():
result.put(v,values[k-1])
return result
def globarea(im=360,jm=180,silent=True):
""" Function calculates the surface area according to TM5 definitions"""
import numpy as np
radius = 6.371e6 # the earth radius in meters
deg2rad = np.pi/180.
g = 9.80665
dxx = 360.0/im*deg2rad
dyy = 180.0/jm*deg2rad
lat = np.arange(-90*deg2rad,90*deg2rad,dyy)
dxy = dxx*(np.sin(lat+dyy)-np.sin(lat))*radius**2
area = np.resize(np.repeat(dxy,im,axis=0) ,[jm,im])
if not silent:
print 'total area of field = ',np.sum(area.flat)
print 'total earth area = ',4*np.pi*radius**2
return area
#! /usr/bin/env python
from datetime import datetime, timedelta
import calendar
from pylab import floor
from matplotlib.dates import date2num, num2date
from numpy import array, zeros, newaxis
def Fromdatetime(date):
dt=date.timetuple()
return datetime(*dt[0:6])
def increase_time(dd,**kwargs):
""" Function increases the time by specified amount"""
return dd+timedelta(**kwargs)
def chardate(dd,cut=8):
return dd.strftime('%Y%m%d%H%M')[0:cut]
def timegen(sd,ed,dt):
dd=[]
while sd <= ed:
dd.append(Fromdatetime(sd))
sd=sd+dt
return dd
def itau2datetime(itau,iyear0):
""" Function returns a datetime object from TM5s itau times"""
date0=datetime(iyear0,1,1,0,0,0)
if len(itau)==1:
itau=[itau]
for time in itau:
sec=time%60
min=(time/60)%60
hrs=(time/3600)%24
day=(time/86400)
dt=timedelta(days=day,hours=hrs,minutes=min,seconds=sec)
yield date0+dt
def date2dec(date):
""" Function converts datetime object to a Decimal number time (e.g., 1991.875 such as in IDL, CCG) """
if not isinstance(date,list): date=[date]
newdate=[]
for dd in date:
Days0=date2num(datetime(dd.year,1,1))
if calendar.isleap(dd.year):
DaysPerYear=366.
else:
DaysPerYear=365.
DayFrac=date2num(dd)
newdate.append(dd.year+(DayFrac-Days0)/DaysPerYear)
if len(newdate) == 1: return newdate[0]
return newdate
def dec2date(dectime):
""" Function converts decimal time from year fraction (e.g., 1991.875 such as in IDL, CCG) to a python datetime object """
dt=num2date(dec2num(dectime)).timetuple()
return datetime(*dt[0:7])
def dec2num(dectime):
""" Function converts decimal time from year fraction (e.g., 1991.875 such as in IDL, CCG) to a python decimal numtime """
if not isinstance(dectime,list): dectime=[dectime]
newdectime=[]
for dd in dectime:
yr=floor(dd)
Days0=date2num(datetime(int(yr),1,1))
if calendar.isleap(yr):
DaysPerYear=366.
else:
DaysPerYear=365.
DayFrac=(dd-yr)*DaysPerYear
newdectime.append(Days0+DayFrac)
if len(newdectime) == 1: return newdectime[0]
return newdectime
def num2dec(numtime):
""" Function converts python decimal numtime to an IDL decimal time """
res=date2dec(num2mydate(numtime))
return res
def num2mydate(num):
""" Function converts decimal time from year fraction (e.g., 1991.875 such as in IDL, CCG) to a python datetime object """
dt=num2date(num).timetuple()
return datetime(*dt[0:7])
def monthgen(sd,ed):
""" Generate sequence of datetime objects spaced by one month"""
from pylab import arange
if ed<sd:
raise ValueError,'start date exceeds end date'
sys.exit(2)
dates=[]
for year in arange(sd.year,ed.year+2):
for month in arange(1,13):
date=datetime(year,month,1)
if date > ed: return dates
else: dates.append(date)
def nextmonth(dd):
""" Find next 1st of the month following the date dd"""
if dd.month == 12:
cc=dd.replace(year=dd.year+1)
ee=cc.replace(month=1)
else:
ee=dd.replace(month=dd.month+1)
ff = ee.replace(day=1)
return ff
def in_interval(start,stop,times_in):
""" returns a list of fractions in time interval """
from numpy import logical_and, arange
from pylab import drange, num2date, date2num
from datetime import timedelta
import copy
times=copy.copy(times_in)
interval=times[1]-times[0]
times.append(times[-1]+interval) # extend by one interval
times_filled=[times[0]+timedelta(days=d) for d in range((times[-1]-times[0]).days)]
b=[]
in_int=0.0
for t in times_filled: # loop over days
if t in times[1:]: # if new interval starts
b.append(in_int) # add previous aggregate to output
in_int=0.0 # reset counter
in_int+=int(logical_and(t >= start,t < stop)) # count if in interval [start,stop >
b.append(in_int)
if len(b) != len(times_in) : raise ValueError
return b
def yearly_avg(time,data,sdev=False):
""" make monthly average from array using rundat and data"""
years=array([d.year for d in time])
aa=[]
ss=[]
tt=[]
dd=time[0]
ed=time[-1]
while dd <= ed:
ddnext=datetime(dd.year+1,1,1)
weights=in_interval(dd,ddnext,time)
if len(weights) > 1:
weights=array(weights)
if weights.sum() > 0.0:
weights=weights/weights.sum()
else:
weights=weights
if weights.shape[0] != data.shape[0]:
raise ValueError,'yearly_avg has wrongly shaped weights (%d) for data of (%d)' %(weights.shape[0],data.shape[0])
sel=(weights != 0.0).nonzero()[0]
#print sel,array(time).take(sel),dd,ddnext
if data.ndim == 1:
avg_data=(weights.take(sel)*data.take(sel,axis=0)).sum(axis=0)
std_data=(weights.take(sel)*data.take(sel,axis=0)).std(axis=0)
elif data.ndim == 2:
avg_data=(weights.take(sel)[:,newaxis]*data.take(sel,axis=0)).sum(axis=0).squeeze()
std_data=(weights.take(sel)[:,newaxis]*data.take(sel,axis=0)).std(axis=0).squeeze()
elif data.ndim == 3:
avg_data=(weights.take(sel)[:,newaxis,newaxis]*data.take(sel,axis=0)).sum(axis=0).squeeze()
std_data=(weights.take(sel)[:,newaxis,newaxis]*data.take(sel,axis=0)).std(axis=0).squeeze()
else:
raise ValueError,'yearly_avg takes 1, 2, or 3d arrays only'
elif len(weights)==1:
avg_data=data[0]
std_data=0.0
else:
continue # next year
aa.append(avg_data)
ss.append(std_data)
tt.append(datetime(dd.year,6,15))
dd=ddnext
aa=array(aa).squeeze()
ss=array(ss).squeeze()
time=tt
if len(tt) == 1:
aa=aa.reshape(1,*aa.shape)
ss=ss.reshape(1,*ss.shape)
if sdev: return time,aa, ss
else : return time,aa
def monthly_avg(time,data,sdev=False):
""" make monthly average from array using rundat and data"""
years=array([d.year for d in time])
months=array([d.month for d in time])
mm=[]
ss=[]
tt=[]
dd=time[0]
ed=time[-1]
while dd <= ed:
ddnext=nextmonth(dd)
weights=in_interval(dd,ddnext,time)
if len(weights) > 1:
weights=array(weights)
if weights.sum() > 0.0:
weights=weights/weights.sum()
else:
weights=weights
if weights.shape[0] != data.shape[0]:
raise ValueError,'yearly_avg has wrongly shaped weights (%d) for data of (%d)' %(weights.shape[0],data.shape[0])
sel=(weights != 0.0).nonzero()[0]
#print sel,array(time).take(sel),dd,nextmonth(dd)
if data.ndim == 1:
avg_data=(weights.take(sel)*data.take(sel,axis=0)).sum(axis=0)
std_data=(weights.take(sel)*data.take(sel,axis=0)).std(axis=0)
elif data.ndim == 2:
avg_data=(weights.take(sel)[:,newaxis]*data.take(sel,axis=0)).sum(axis=0).squeeze()
std_data=(weights.take(sel)[:,newaxis]*data.take(sel,axis=0)).std(axis=0).squeeze()
elif data.ndim == 3:
avg_data=(weights.take(sel)[:,newaxis,newaxis]*data.take(sel,axis=0)).sum(axis=0).squeeze()
std_data=(weights.take(sel)[:,newaxis,newaxis]*data.take(sel,axis=0)).std(axis=0).squeeze()
else:
raise ValueError,'monthly_avg takes 1, 2, or 3d arrays only'
elif len(weights)==1:
avg_data=data[0]
std_data=0.0
else:
continue # next month
mm.append(avg_data)
ss.append(std_data)
tt.append(datetime(dd.year,dd.month,15))
dd=ddnext
mm=array(mm).squeeze()
ss=array(ss).squeeze()
time=tt
if len(tt) == 1:
mm=mm.reshape(-1,*mm.shape)
ss=ss.reshape(-1,*ss.shape)
if sdev: return time,mm, ss
else : return time,mm
def season_avg(time,data,sdev=False):
""" make season average from array using rundat and data"""
seas = [[1,2,3],[4,5,6],[7,8,9],[10,11,12]]
mm=[]
ss=[]
tt=[]
dd=time[0]
ed=time[-1]
while dd <= ed:
ddmid=nextmonth(dd)
ddnext=nextmonth(nextmonth(nextmonth(dd)))
weights=in_interval(dd,ddnext,time)
if len(weights) > 1:
weights=array(weights)
if weights.sum() > 0.0:
weights=weights/weights.sum()
else:
weights=weights
if weights.shape[0] != data.shape[0]:
raise ValueError,'yearly_avg has wrongly shaped weights (%d) for data of (%d)' %(weights.shape[0],data.shape[0])
sel=(weights != 0.0).nonzero()[0]
#print sel,array(time).take(sel),dd,nextmonth(dd)
if data.ndim == 1:
avg_data=(weights.take(sel)*data.take(sel,axis=0)).sum(axis=0)
std_data=(weights.take(sel)*data.take(sel,axis=0)).std(axis=0)
elif data.ndim == 2:
avg_data=(weights.take(sel)[:,newaxis]*data.take(sel,axis=0)).sum(axis=0).squeeze()
std_data=(weights.take(sel)[:,newaxis]*data.take(sel,axis=0)).std(axis=0).squeeze()
elif data.ndim == 3:
avg_data=(weights.take(sel)[:,newaxis,newaxis]*data.take(sel,axis=0)).sum(axis=0).squeeze()
std_data=(weights.take(sel)[:,newaxis,newaxis]*data.take(sel,axis=0)).std(axis=0).squeeze()
else:
raise ValueError,'season_avg takes 1, 2, or 3d arrays only'
elif len(weights)==1:
avg_data=data[0]
std_data=0.0
else:
continue # next month
mm.append(avg_data)
ss.append(std_data)
tt.append(datetime(ddmid.year,ddmid.month,15))
dd=ddnext
mm=array(mm).squeeze()
ss=array(ss).squeeze()
time=tt
if len(tt) == 1:
mm=mm.reshape(-1,*mm.shape)
ss=ss.reshape(-1,*ss.shape)
if sdev: return time,mm, ss
else : return time,mm
def longterm_avg(time,data):
""" Create long term mean """
time_avg = num2date(date2num(time).mean())
data_avg = data.mean(axis=0)
return time_avg, data_avg
if __name__ == '__main__':
#print monthgen(datetime(2000,1,1),datetime(2006,5,1))
dd=datetime(2002,3,1)
print nextmonth(dd),dd
#!/usr/bin/env python
import os
import sys
sys.path.append('../../')
rootdir = os.getcwd().split('da/')[0]
analysisdir = os.path.join(rootdir,'da/analysis')
from string import join, split
from numpy import array, identity, zeros, arange
import da.tools.io4 as io
# Get masks of different region definitions
matrix_file = os.path.join(analysisdir,'regions.nc')
cdf_temp = io.CT_CDF(matrix_file,'read')
transcommask = cdf_temp.GetVariable('transcom_regions')
olson240mask = cdf_temp.GetVariable('regions')
olsonmask = cdf_temp.GetVariable('land_ecosystems')
oifmask = cdf_temp.GetVariable('ocean_regions')
dummy = cdf_temp.close()
# Names and short names of TransCom regions
transshort = []
transnams = []
transland = []
temp = open(os.path.join(analysisdir,'t3_region_names'),'r').readlines()
for line in temp:
items = line.split()
if items:
num,abbr,name=(items[0],items[1],join(items[2:]),)
transnams.append(name.strip('"'))
transshort.append(abbr)
if abbr.startswith('T'): transland.append(name.strip('"'))
transnams.append("Non-optimized")
transshort.append("I-NNOP")
# Names and short names of Olson regions
olsonnams = []
olsonshort = []
temp = open(os.path.join(analysisdir,'olson19_region_names'),'r').readlines()
for line in temp:
items = line.split()
if items:
num,abbr,name=(items[0],items[1],join(items[2:]),)
olsonnams.append(name.strip('"'))
olsonshort.append(abbr)
ext_transnams = []
ext_transshort = []
ext_transcomps = []
# Get names of aggregated regions for post aggregation
matrix_file = os.path.join(analysisdir,'postagg_definitions.nc')
cdf_temp = io.CT_CDF(matrix_file,'read')
xform = cdf_temp.GetVariable('xform')
keys = cdf_temp.ncattrs()
keys.sort()
for k in keys:
if 'longname' in k:
ext_transnams.append(getattr(cdf_temp,k) )
if 'shortname' in k:
ext_transshort.append(getattr(cdf_temp,k) )
if 'component' in k:
ext_transcomps.append(map(int,getattr(cdf_temp,k).split(',')) )
dummy = cdf_temp.close()
# Names of the ocean inversion flux regions, to go along with oifmask
oifnams=['(1) NOCN Arctic Ocean',\
'(2) NAH North Atlantic (49 - 76N)',\
'(3) NAM North Atlantic (36 - 49N)',\
'(4) NAL North Atlantic (18 - 36N)',\
'(5) NAT North Atlantic ( 0 - 18N)',\
'(6) SAT South Atlantic ( 0 - 18S)',\
'(7) SAL South Atlantic (18 - 31S)',\
'(8) SAM South Atlantic (31 - 44S)',\
'(9) SAH South Atlantic (44 - 58S)',\
'(10) SOCN Southern Ocean (S of 58S)',\
'(11) NPHW North Pacific (N of 49N, W of 195E)',\
'(12) NPHE North Pacific (N of 36N, E of 195E)',\
'(13) NPK North Pacific (Kuroshio Extension)',\
'(14) NPLW North Pacific (18N - K.Ext, W of 195E)',\
'(15) NPLE North Pacific (18 - 36N, E of 195E)',\
'(16) NPTW North Pacific ( 0 - 18N, W of 199E)',\
'(17) NPTE North Pacific ( 0 - 18N, E of 199E)',\
'(18) SPTW South Pacific ( 0 - 18S, W of 199E)',\
'(19) SPTE South Pacific ( 0 - 18S, E of 199E)',\
'(20) SPLW South Pacific (18 - 31S, W of 233E)',\
'(21) SPLE South Pacific (18 - 31S, E of 233E)',\
'(22) SPMW South Pacific (31 - 44S, W of 248E)',\
'(23) SPME South Pacific (31 - 44S, E of 248E, W of 278E)',\
'(24) SPMC South Pacific (31 - 44S, coastal E of 278E)',\
'(25) SPH South Pacific (44 - 58S) ',\
'(26) NI North Indian',\
'(27) SIT South Indian (0 - 18S)',\
'(28) SIL South Indian (18 - 31S)',\
'(29) SIM South Indian (31 - 44S)',\
'(30) SIH South Indian (44 - 58S)']
oiflocs=[ (200, 80,),\
(330, 55,),\
(330, 40,),\
(330, 22,),\
(330, 8,),\
(350,-12,),\
(350,-27,),\
(350,-40,),\
(350,-53,),\
(200,-70,),\
(178, 54,),\
(210, 40,),\
(165, 38,),\
(178, 25,),\
(215, 25,),\
(170, 8,),\
(230, 8,),\
(175,-10,),\
(240,-10,),\
(195,-27,),\
(265,-27,),\
(195,-40,),\
(262,-40,),\
(283,-40,),\
(220,-53,),\
(68, 8,),\
(75, -10,),\
(75, -27,),\
(75,-40,),\
(75, -53,)]
translocs=[ (-177,0), \
(-92,53,),\
(-108,34,),\
(-66,4,),\
(-50,-17,),\
(15,17,),\
(26,-12,),\
(84,63,),\
(103,30,),\
(115,0,),\
(132,-25,),\
(9,50,),\
(-174,46,),\
(136,6,),\
(-108,6,),\
(-123,-15,),\
(-32,58,),\
(-32,38,),\
(-32,0,),\
(-32,-38,),\
(-14,-65,),\
(68,2,)]
#olsonshort=[str(name.split()[1:2]).join(' ') for name in olsonnams]
old_olsonshort=[join(split(name,' ')[1:2],' ') for name in olsonnams]
olsonlabs=['Conifer Forest','Broadleaf Forest','Mixed Forest','Grass/Shrub','Tropical Forest','Scrub/Woods','Semitundra','Fields/Woods/\nSavanna',\
'Northern Taiga','Forest/Field','Wetland','Deserts','Shrub/Tree/\nSuc ','Crops','Conifer\n Snowy/Coastal',\
'Wooded tundra','Mangrove','Ice and \nPolar desert','Water']
ecmwfnams=[ ' 1 CRPSMF Crops, mixed farming',\
' 2 SHGRSS Short Grass',\
' 3 EVNDLF Evergreen Needleleaf',\
' 4 DECNDLF Deciduous Needleleaf',\
' 5 EVBRDLF Evergreen Broadleaf',\
' 6 DECBRLF Deciduous Broadleaf',\
' 7 TLGRSS Tall Grass',\
' 8 DES Desert',\
' 9 TDR Tundra',\
'10 IRRCR Irrigated Crops',\
'11 SMDES Semidesert',\
'12 ICE Ice Caps',\
'13 BGM Bogs and Marches',\
'14 INW Inland Water',\
'15 OCE Ocean',\
'16 EVSHRB Evergreen Shrubs',\
'17 DECSHR Deciduous shrubs',\
'18 MXFRST Mixed Forest',\
'19 INTFRST Interrupted Forest']
ecmwfshort=[str(name.split()[1:2]).join(' ') for name in ecmwfnams]
ecmwflabs=['Crops, mixed farming','Short Grass','Evergreen Needleleaf','Deciduous Needleleaf','Evergreen Broadleaf',\
'Deciduous Broadleaf','Tall Grass','Desert',\
'Tundra','Irrigated Crops','Semidesert','Ice Caps','Bogs and Marches','Inland Water','Ocean',\
'Evergreen Shrubs','Deciduous shrubs','Mixed Forest','Interrupted Forest']
a =array([\
0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,\
0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,1 ,0 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,\
0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,1 ,0 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,\
0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,\
1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,\
0 ,0 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,\
0 ,0 ,0 ,0 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,\
0 ,0 ,0 ,0 ,0 ,0 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,\
0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,1 ,0 ,0 ,0 ,0 ,1 ,\
0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,1 ,1 ,0 ,0 ,0 ,\
0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,1 ,1 ,0])
O30to11 = a.reshape(11,30).transpose()
O11to11 = identity(11)
ntcland = 11 # TC standard
ntcocean = 11 # TC standard
Ols259_to_TC23 = zeros((259,23),float)
Ols240_to_TC23 = zeros((240,23),float)
Ols221_to_TC23 = zeros((221,23),float)
for i in arange(ntcland):
Ols259_to_TC23[i*19:(i+1)*19,i]=1.0
Ols259_to_TC23[190:228,10] = 1.0 # Europe
Ols259_to_TC23[228:258,11:22] = O30to11
for i in arange(ntcland): Ols240_to_TC23[i*19:(i+1)*19,i]=1.0
Ols240_to_TC23[209:239,11:22] = O30to11
for i in arange(ntcland): Ols221_to_TC23[i*19:(i+1)*19,i]=1.0
Ols221_to_TC23[209:220:,11:22] = O11to11
Ols221_to_TC23[220,22]=1.0
Ols240_to_TC23[239,22]=1.0
Ols259_to_TC23[258,22]=1.0
ntcland = 11 # TC standard
ntcocean = 11 # TC standard
ExtendedTCRegionsFile='postagg_definitions.nc'
def StateToTranscom(runinfo,x):
""" convert to transcom shaped regions"""
from numpy import dot
try:
nparams = runinfo.nparameters
except:
nparams = runinfo
if nparams==240:
M=Ols240_to_TC23
elif nparams==221:
M=Ols221_to_TC23
elif nparams==259:
M=Ols259_to_TC23
else:
raise ValueError('Do not know how to convert %s regions to 23 transcom regions'%(nparams,))
return dot(array(x).squeeze(),M)
def StateCovToTranscom(runinfo,p):
""" convert to transcom shaped regions"""
try:
nparams = runinfo.nparameters
except:
nparams = runinfo
if nparams==240:
M=Ols240_to_TC23
elif nparams==221:
M=Ols221_to_TC23
elif nparams==259:
M=Ols259_to_TC23
else:
raise ValueError('Do not know how to convert %s regions to 23 transcom regions'%(nparams,))
return transpose(dot(dot(transpose(M),p),M),(1,0,2))
def ExtendedTCRegions(data,cov=False):
""" convert to extended transcom shaped regions"""
import da.tools.io4 as io
from numpy import dot
nparams = data.shape[-1]
if nparams != 23 :
raise ValueError('Do not know how to convert %s regions to 37 extended transcom regions'%(nparams,))
M = xform
if not cov:
return dot(array(data).squeeze(),M)
else:
return transpose(dot(transpose(M),dot(array(data).squeeze(),M)),(1,0,2))
def cov2corr(A):
b=1./sqrt(A.diagonal())
return A*dot(b[:,newaxis],b[newaxis,:])
def map_to_tc(data):
""" function projects 1x1 degree map onto TransCom regions by adding gridboxes over larger areas """
from hdf2field import Sds2field
import cPickle
import os
from plottools import rebin
transcommapfile= 'tc_land11_oif30.hdf'
transcomconversionfile= 'map_to_tc.pickle'
try:
regionselect = cPickle.load(open(transcomconversionfile,'rb'))
except:
# read map from NetCDF
print '[in map_to_tc() in tctools.py:] '+ \
'Creating conversion map and pickle file for future quick use, patience please...'
map = Sds2field(transcommapfile,'tc_region')
# create dictionary for region <-> map conversions based on 1x1 map
regs={}
nregions=map.max()
for r in arange(1,nregions+1):
sel=(map.flat == r).nonzero()
if len(sel[0])>0: regs[r]=sel
regionselect=regs
dummy = cPickle.dump(regionselect,open(transcomconversionfile,'wb'),-1)
result=zeros(len(regionselect.keys()),float)
for k,v in regionselect.iteritems():
result[k-1]=data.ravel().take(v).sum()
return result
def LookUpName(rundat,reg=33,tc=False,eco=False, olson=False, longnames=False):
""" return name of region number reg """
if longnames:
econames=olsonnams
else :
econames=olsonshort
if tc:
return (transnams[reg-1],)
elif eco:
if reg > rundat.npparameters:
raise IOError,'Region number exceeds definitions'
elif reg > rundat.n_land and reg != rundat.nparameters:
ret=('Ocean', oifnams[reg-rundat.n_land-1])
elif reg > 209 and reg <= rundat.n_land:
ret=('Europe', econames[(reg-1)%19]+"_East")
elif reg == rundat.nparameters:
ret=(transnams[-1])
else:
ret=(transnams[(reg-1)/19], econames[(reg-1)%19])
return ret
elif olson:
return (econames[(reg-1)%19],)
if __name__ == '__main__':
print transnams
print transshort
print ext_transnams
print ext_transshort
print olsonnams
print olsonshort
print ext_transcomps
#!/usr/bin/env python
# control.py
"""
.. module:: dasystem
.. moduleauthor:: Wouter Peters
Revision History:
File created on 26 Aug 2010.
The DaSystem class is found in the module :mod:`dasystem`, or in a specific implementation under the da/ source tree. It is derived from the standard python :class:`dictionary` object.
It describes the details of the data assimilation system used (i.e., CarbonTracker, or CT Methane, or ....) ::
datadir : /Volumes/Storage/CO2/carbontracker/input/ct08/ ! The directory where input data is found
obs.input.dir : ${datadir}/obsnc/with_fillvalue ! the observation input dir
obs.input.fname : obs_forecast.nc ! the observation input file
ocn.covariance : ${datadir}/oif_p3_era40.dpco2.2000.01.hdf ! the ocean flux covariance file
bio.covariance : ${datadir}/covariance_bio_olson19.nc ! the biosphere flux covariance file
deltaco2.prefix : oif_p3_era40.dpco2 ! the type of ocean product used
regtype : olson19_oif30 ! the ecoregion definitions
nparameters : 240 ! the number of parameters to solve for
random.seed : 4385 ! the random seed for the first cycle
regionsfile : transcom_olson19_oif30.hdf ! the ecoregion defintion mask file
! Info on the sites file used
obs.sites.rc : ${datadir}/sites_and_weights_co2.ct10.rc ! the weights in the covariance matric of each obs
The full baseclass description:
.. autoclass:: da.baseclasses.dasystem.DaSystem
:members:
"""
import os
import sys
import logging
import datetime
################### Begin Class DaSystem ###################
class DaSystem(dict):
"""
Information on the data assimilation system used. This is normally an rc-file with settings.
"""
def __init__(self,rcfilename):
"""
Initialization occurs from passed rc-file name, items in the rc-file will be added
to the dictionary
"""
self.Identifier = 'CarbonTracker CO2' # the identifier gives the platform name
self.LoadRc(rcfilename)
msg = 'Data Assimilation System initialized: %s'%self.Identifier ; logging.debug(msg)
def __str__(self):
"""
String representation of a DaInfo object
"""
msg = "===============================================================" ; print msg
msg = "DA System Info rc-file is %s" % self.RcFileName ; print msg
msg = "===============================================================" ; print msg
return ""
def LoadRc(self,RcFileName):
"""
This method loads a DA System Info rc-file with settings for this simulation
"""
import da.tools.rc as rc
for k,v in rc.read(RcFileName).iteritems():
self[k] = v
self.RcFileName = RcFileName
self.DaRcLoaded = True
msg = 'DA System Info rc-file (%s) loaded successfully'%self.RcFileName ; logging.debug(msg)
return True
def Initialize(self ):
"""
Initialize the object.
"""
def Validate(self ):
"""
Validate the contents of the rc-file given a dictionary of required keys
"""
needed_rc_items={}
for k,v in self.iteritems():
if v == 'True' : self[k] = True
if v == 'False': self[k] = False
for key in needed_rc_items:
if not self.has_key(key):
status,msg = ( False,'Missing a required value in rc-file : %s' % key)
logging.error(msg)
raise IOError,msg
status,msg = ( True,'DA System Info settings have been validated succesfully' ) ; logging.debug(msg)
return None
################### End Class DaSystem ###################
if __name__ == "__main__":
pass
#!/usr/bin/env python
# obs.py
"""
.. module:: obs
.. moduleauthor:: Wouter Peters
Revision History:
File created on 28 Jul 2010.
.. autoclass:: da.baseclasses.obs.Observation
:members: Initialize, Validate, AddObs, AddSimulations, AddModelDataMismatch, WriteSampleInfo
.. autoclass:: da.baseclasses.obs.ObservationList
:members: __init__
"""
import os
import sys
import logging
import datetime
from numpy import array, ndarray
identifier = 'Observation baseclass'
version = '0.0'
################### Begin Class Observation ###################
class Observation(object):
"""
The baseclass Observation is a generic object that provides a number of methods required for any type of observations used in
a data assimilation system. These methods are called from the CarbonTracker pipeline.
.. note:: Most of the actual functionality will need to be provided through a derived Observations class with the methods
below overwritten. Writing your own derived class for Observations is one of the first tasks you'll likely
perform when extending or modifying the CarbonTracker Data Assimilation Shell.
Upon initialization of the class, an object is created that holds no actual data, but has a placeholder attribute `self.Data`
which is an empty list of type :class:`~da.baseclasses.obs.ObservationList`. An ObservationList object is created when the
method :meth:`~da.baseclasses.obs.Observation.AddObs` is invoked in the pipeline.
From the list of observations, a file is written by method
:meth:`~da.baseclasses.obs.Observation.WriteSampleInfo`
with the sample info needed by the
:class:`~da.baseclasses.observationoperator.ObservationOperator` object. The values returned after sampling
are finally added by :meth:`~da.baseclasses.obs.Observation.AddSimulations`
"""
def __init__(self, DaCycle = None):
"""
create an object with an identifier, version, and an empty ObservationList
"""
self.Identifier = self.getid()
self.Version = self.getversion()
self.Data = ObservationList([]) # Initialize with an empty list of obs
# The following code allows the object to be initialized with a DaCycle object already present. Otherwise, it can
# be added at a later moment.
if DaCycle != None:
self.DaCycle = DaCycle
else:
self.DaCycle = {}
msg = 'Observation object initialized: %s'%self.Identifier ; logging.info(msg)
def getid(self):
"""
Return the identifier of the Observation Object, used for logging purposed
"""
return identifier
def getversion(self):
"""
Return the version of the Observation Object, used for logging purposed
"""
return identifier
def __str__(self):
""" Prints a list of Observation objects"""
return "This is a %s object, version %s"%(self.Identifier,self.Version)
def Initialize(self):
""" Perform all steps needed to start working with observational data, this can include moving data, concatenating files,
selecting datasets, etc.
"""
def Validate(self):
""" Make sure that data needed for the ObservationOperator (such as observation input lists, or parameter files)
are present.
"""
def AddObs(self):
"""
Add actual observation data to the Observation object. This is in a form of an
:class:`~da.baseclasses.obs.ObservationList` that is contained in self.Data. The
list has as only requirement that it can return the observed+simulated values
through the method :meth:`~da.baseclasses.obs.ObservationList.getvalues`
"""
def AddSimulations(self):
""" Add the simulation data to the Observation object.
"""
def AddModelDataMismatch(self ):
"""
Get the model-data mismatch values for this cycle.
"""
def WriteSampleInfo(self ):
"""
Write the information needed by the observation operator to a file. Return the filename that was written for later use
"""
################### End Class Observations ###################
################### Begin Class ObservationList ###################
class ObservationList(list):
""" This is a special type of list that holds observed sample objects. It has methods to extract data from such a list easily """
def getvalues(self,name,constructor=array):
result = constructor([getattr(o,name) for o in self])
if isinstance(result,ndarray):
return result.squeeze()
else:
return result
################### End Class ObservationList ###################
#!/usr/bin/env python
# model.py
"""
.. module:: observationoperator
.. moduleauthor:: Wouter Peters
Revision History:
File created on 30 Aug 2010.
"""
import os
import sys
import logging
import datetime
identifier = 'GeneralObservationOperator'
version = '0.0'
################### Begin Class ObservationOperator ###################
class ObservationOperator(object):
"""
Testing
=======
This is a class that defines an ObervationOperator. This object is used to control the sampling of
a statevector in the ensemble Kalman filter framework. The methods of this class specify which (external) code
is called to perform the sampling, and which files should be read for input and are written for output.
The baseclasses consist mainly of empty methods that require an application specific application
"""
def __init__(self, RcFileName,DaCycle = None):
""" The instance of an ObservationOperator is application dependent """
self.Identifier = self.getid()
self.Version = self.getversion()
self.RestartFileList = []
self.outputdir = None # Needed for opening the samples.nc files created
dummy = self.LoadRc(RcFilename) # load the specified rc-file
dummy = self.ValidateRc() # validate the contents
msg = 'Observation Operator object initialized: %s'%self.Identifier ; logging.info(msg)
# The following code allows the object to be initialized with a DaCycle object already present. Otherwise, it can
# be added at a later moment.
if DaCycle != None:
self.DaCycle = DaCycle
else:
self.DaCycle = {}
def getid(self):
return identifier
def getversion(self):
return version
def GetInitialData(self):
""" This method places all initial data needed by an ObservationOperator in the proper folder for the model """
return None
def __str__(self):
return "This is a %s object, version %s"%(self.Identifier,self.Version)
def Initialize(self):
""" Perform all steps necessary to start the observation operator through a simple Run() call """
def ValidateInput(self):
""" Make sure that data needed for the ObservationOperator (such as observation input lists, or parameter files)
are present.
"""
def SaveData(self):
""" Write the data that is needed for a restart or recovery of the Observation Operator to the save directory """
################### End Class ObservationOperator ###################
if __name__ == "__main__":
pass
This diff is collapsed.
#!/usr/bin/env python
# jobcontrol.py
"""
.. module:: platform
.. moduleauthor:: Wouter Peters
Revision History:
File created on 06 Sep 2010.
The PlatForm class is found in the module :mod:`platform`, or in a specific implementation under the da/source tree.
The platform object holds attributes and methods that allow job control on each specific platform. This includes methods to create and submit jobs, but also to obtain process and/or job ID's. These are needed to control the flow of
the system on each platform.
Typically, every platform needs specific implementations of this object (through inheritance), and you should refer to your specific PlatForm object documentation for details (see *da/platform/*).
.. autoclass:: da.baseclasses.platform.PlatForm
:members:
:inherited-members:
"""
import sys
import os
import logging
import subprocess
std_joboptions={'jobname':'test','jobaccount':'co2','jobnodes':'nserial 1','jobshell':'/bin/sh','depends':'','jobtime':'00:30:00'}
class PlatForm(object):
"""
This specifies platform dependent options under generic object calls. A platform object is used to control and submit jobs
"""
def __init__(self):
"""
The init function reports the hard-coded ``Identifier`` and ``Version`` of the PlatForm. Since each new
computer/user requires their own PlatForm object modifications, the init function is usually overwritten
in the specific implementation of this class
"""
self.Identifier = 'iPad' # the identifier gives the plaform name
self.Version = '1.0' # the platform version used
msg1 = '%s object initialized'%self.Identifier ; logging.debug(msg1)
msg2 = '%s version: %s'%(self.Identifier,self.Version) ; logging.debug(msg2)
def __str__(self):
return self.Version
def GetJobTemplate(self,joboptions={},block=False):
"""
Returns the job template for a given computing system, and fill it with options from the dictionary provided as argument.
The job template should return the preamble of a job that can be submitted to a queue on your platform,
examples of popular queuing systems are:
- SGE
- MOAB
- XGrid
-
A list of job options can be passed through a dictionary, which are then filled in on the proper line,
an example is for instance passing the dictionary {'account':'co2'} which will be placed
after the ``-A`` flag in a ``qsub`` environment.
An extra option ``block`` has been added that allows the job template to be configured to block the current
job until the submitted job in this template has been completed fully.
"""
template = """## \n"""+ \
"""## This is a set of dummy names, to be replaced by values from the dictionary \n"""+ \
"""## Please make your own platform specific template with your own keys and place it in a subfolder of the da package.\n """+ \
"""## \n"""+ \
""" \n"""+ \
"""#$ jobname \n"""+ \
"""#$ jobaccount \n"""+ \
"""#$ jobnodes \n"""+ \
"""#$ jobtime \n"""+ \
"""#$ jobshell \n """
if 'depends' in joboptions:
template += """#$ -hold_jid depends \n"""
# First replace from passed dictionary
for k,v in joboptions.iteritems():
while k in template:
template = template.replace(k,v)
# Fill remaining values with std_options
for k,v in std_joboptions.iteritems():
while k in template:
template = template.replace(k,v)
return template
def GetMyID(self):
""" Return the process ID, or job ID of the current process or job"""
return os.getpid()
def WriteJob(self,jobfile,template, jobid):
"""
This method writes a jobfile to the exec dir and makes it executable (mod 477)
"""
#
# Done, write jobfile
#
f = open(jobfile,'w')
dummy = f.write(template)
dummy = f.close()
dummy = os.chmod(jobfile,477)
msg = "A job file was created (%s)"%jobfile ; logging.debug(msg)
return None
def SubmitJob(self,jobfile,joblog=None,block=False):
"""
:param jobfile: a string with the filename of a jobfile to run
:param joblog: a string with the filename of a logfile to write run output to
:param block: Boolean specifying whether to submit and continue (F), or submit and wait (T)
:rtype: integer
This method submits a jobfile to the queue, and returns the job ID
"""
from string import join
cmd = ["sh",jobfile]
msg = "A new task will be started (%s)"%cmd ; logging.info(msg)
if block:
jobid = subprocess.call(cmd)
else:
jobid = subprocess.Popen(cmd).pid
# info ...
infotext = []
infotext.append( '\n' )
infotext.append( 'Summary:\n' )
infotext.append( '\n' )
infotext.append( 'job script : %s\n' % jobfile )
infotext.append( 'job log : %s\n' % joblog )
infotext.append( '\n')
infotext.append( 'To manage this process:\n' )
infotext.append( '\n' )
infotext.append( ' # kill process:\n' )
infotext.append( ' kill %i\n' % jobid )
infotext.append( ' \n' )
infotext.append( '\n' )
# write to log:
for line in infotext : logging.info( line.strip() )
return 0
def KillJob(self,jobid):
""" This method kills a running job """
return None
def StatJob(self,jobid):
""" This method gets the status of a running job """
import subprocess
output = subprocess.Popen(['qstat',jobid], stdout=subprocess.PIPE).communicate()[0] ; logging.info(output)
return output
if __name__ == "__main__":
pass
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment