Commit f4af5f85 authored by Woude, Auke van der's avatar Woude, Auke van der
Browse files

Merge branch 'Develop' into 'dynamicFFmodel'

Develop

See merge request ctdas/CTDAS!19
parents fc8cdeff be766b72
......@@ -2,5 +2,12 @@
*.py
*.rc
*.jb
/da/*/__pycache__
/da/*/*.pyc
joblog
# Exclude the logfiles
*.log
# Exclude files created by the slurm system
slurm*.out
# Exclude compiled files
da/**/__pycache__/
da/**/*.pyc
#da/**/*.bak
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>ctdas_light_refactor_new</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
</natures>
</projectDescription>
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?>
<pydev_project>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/ctdas_light_refactor_new</path>
</pydev_pathproperty>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
</pydev_project>
This diff is collapsed.
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu.
This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation,
version 3. This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program. If not, see <http://www.gnu.org/licenses/>."""
#!/usr/bin/env python
# merge_ctdas_runs.py
"""
Author : peters
Revision History:
File created on 14 Jul 2014.
This scrip merges the analysis directory from multiple projects into one new folder.
It steps over existing analysis output files from weekly means, and then averages these to daily/monthy/yearly values.
"""
import datetime as dt
import os
import sys
import shutil
import time_avg_fluxes as tma
basedir = '/Storage/CO2/ingrid/'
basedir2 = '/Storage/CO2/peters/'
targetproject = 'geocarbon-ei-sibcasa-gfed4-zoom-gridded-combined-convec-20011230-20130101'
targetdir = os.path.join(basedir2,targetproject)
sources = {
'2000-01-01 through 2011-12-31': os.path.join(basedir,'carbontracker','geocarbon-ei-sibcasa-gfed4-zoom-gridded-convec-combined'),
'2012-01-01 through 2012-12-31': os.path.join(basedir2,'geocarbon-ei-sibcasa-gfed4-zoom-gridded-convec-20111231-20140101'),
}
dirs = ['flux1x1','transcom','country','olson']
dacycle = {}
dacycle['time.start'] = dt.datetime(2000,12,30)
dacycle['time.end'] = dt.datetime(2013,1,1)
dacycle['cyclelength'] = dt.timedelta(days=7)
dacycle['dir.analysis'] = os.path.join(targetdir,'analysis')
if __name__ == "__main__":
if not os.path.exists(targetdir):
os.makedirs(targetdir)
if not os.path.exists(os.path.join(targetdir,'analysis')):
os.makedirs(os.path.join(targetdir,'analysis') )
for nam in dirs:
if not os.path.exists(os.path.join(targetdir,'analysis','data_%s_weekly'%nam)):
os.makedirs(os.path.join(targetdir,'analysis','data_%s_weekly'%nam) )
timedirs=[]
for ss,vv in sources.iteritems():
sds,eds = ss.split(' through ')
sd = dt.datetime.strptime(sds,'%Y-%m-%d')
ed = dt.datetime.strptime(eds,'%Y-%m-%d')
timedirs.append([sd,ed,vv])
print sd,ed, vv
while dacycle['time.start'] < dacycle['time.end']:
# copy the weekly flux1x1 file from the original dir to the new project dir
for td in timedirs:
if dacycle['time.start'] >= td[0] and dacycle['time.start'] <= td[1]:
indir=td[2]
# Now time avg new fluxes
infile = os.path.join(indir,'analysis','data_flux1x1_weekly','flux_1x1.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) )
#print os.path.exists(infile),infile
shutil.copy(infile,infile.replace(indir,targetdir) )
tma.time_avg(dacycle,avg='flux1x1')
infile = os.path.join(indir,'analysis','data_transcom_weekly','transcom_fluxes.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) )
#print os.path.exists(infile),infile
shutil.copy(infile,infile.replace(indir,targetdir) )
tma.time_avg(dacycle,avg='transcom')
infile = os.path.join(indir,'analysis','data_olson_weekly','olson_fluxes.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) )
#print os.path.exists(infile),infile
shutil.copy(infile,infile.replace(indir,targetdir) )
tma.time_avg(dacycle,avg='olson')
infile = os.path.join(indir,'analysis','data_country_weekly','country_fluxes.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) )
#print os.path.exists(infile),infile
shutil.copy(infile,infile.replace(indir,targetdir) )
tma.time_avg(dacycle,avg='country')
dacycle['time.start'] += dacycle['cyclelength']
This diff is collapsed.
This diff is collapsed.
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu.
This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation,
version 3. This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program. If not, see <http://www.gnu.org/licenses/>."""
#!/usr/bin/env python
# time_avg_fluxes.py
"""
Author : peters
Revision History:
File created on 20 Dec 2012.
"""
import sys
sys.path.append('../../')
import os
import sys
import shutil
from dateutil.relativedelta import relativedelta
import datetime
import subprocess
def time_avg(dacycle,avg='transcom'):
""" Function to create a set of averaged files in a folder, needed to make longer term means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
if not os.path.exists(analysisdir):
raise IOError,'analysis dir requested (%s) does not exist, exiting...'%analysisdir
daily_avg(dacycle,avg)
monthly_avg(dacycle,avg)
yearly_avg(dacycle,avg)
longterm_avg(dacycle,avg)
def new_month(dacycle):
""" check whether we just entered a new month"""
this_month = dacycle['time.start'].month
prev_month = (dacycle['time.start']-dacycle['cyclelength']).month
return (this_month != prev_month)
def new_year(dacycle):
""" check whether we just entered a new year"""
this_year = dacycle['time.start'].year
prev_year = (dacycle['time.start']-dacycle['cyclelength']).year
return (this_year != prev_year)
def daily_avg(dacycle,avg):
""" Function to create a set of daily files in a folder, needed to make longer term means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
weekdir = os.path.join(analysisdir , 'data_%s_weekly'%avg)
daydir = os.path.join(analysisdir , 'data_%s_daily'%avg)
if not os.path.exists(daydir):
print "Creating new output directory " + daydir
os.makedirs(daydir)
files = os.listdir(weekdir)
files = [f for f in files if '-' in f and f.endswith('.nc')]
fileinfo = {}
for filename in files:
date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m-%d')
fileinfo[filename] = date
dt = dacycle['cyclelength']
for k,v in fileinfo.iteritems():
cycle_file = os.path.join(weekdir,k)
for i in range(abs(dt.days)):
daily_file = os.path.join(daydir,'%s_fluxes.%s.nc'%(avg,(v+datetime.timedelta(days=i)).strftime('%Y-%m-%d')))
if not os.path.lexists(daily_file):
os.symlink(cycle_file,daily_file)
#print daily_file,cycle_file
def monthly_avg(dacycle,avg):
""" Function to average a set of files in a folder from daily to monthly means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
daydir = os.path.join(analysisdir , 'data_%s_daily'%avg)
monthdir = os.path.join(analysisdir,'data_%s_monthly'%avg)
if not os.path.exists(monthdir):
print "Creating new output directory " + monthdir
os.makedirs(monthdir)
files = os.listdir(daydir) # get daily files
files = [f for f in files if '-' in f and f.endswith('.nc')]
if len(files) < 28:
print 'No month is yet complete, skipping monthly average'
return
fileinfo = {}
for filename in files: # parse date from each of them
date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m-%d')
fileinfo[filename] = date
years = [d.year for d in fileinfo.values()] # get actual years
months = set([d.month for d in fileinfo.values()]) # get actual months
sd = datetime.datetime(min(years),1,1)
ed = datetime.datetime(max(years)+1,1,1)
while sd < ed:
nd = sd + relativedelta(months=+1)
ndays_in_month = (nd-sd).days
avg_files = [os.path.join(daydir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]
if len(avg_files) != ndays_in_month: # only once month complete
#print 'New month (%02d) is not yet complete, skipping monthly average'%(sd.month)
pass
else:
targetfile = os.path.join(monthdir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y-%m')))
if not os.path.exists(targetfile):
print "New month (%02d) is complete, I have %d days for the next file"%(sd.month,ndays_in_month)
command = ['ncra','-O']+ avg_files + [targetfile]
status = subprocess.check_call(command)
else:
pass
sd = nd
def yearly_avg(dacycle,avg):
""" Function to average a set of files in a folder from monthly to yearly means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
monthdir = os.path.join(analysisdir , 'data_%s_monthly'%avg )
yeardir = os.path.join(analysisdir,'data_%s_yearly'%avg)
if not os.path.exists(yeardir):
print "Creating new output directory " + yeardir
os.makedirs(yeardir)
files = os.listdir(monthdir) # get monthly files
files = [f for f in files if '-' in f and f.endswith('.nc')]
if not files:
print "No full year finished yet, skipping yearly average..."
return
fileinfo = {}
for filename in files:
date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m')
fileinfo[filename] = date
years = set([d.year for d in fileinfo.values()])
sd = datetime.datetime(min(years),1,1)
ed = datetime.datetime(max(years)+1,1,1)
while sd < ed:
nd = sd + relativedelta(years=+1)
avg_files = [os.path.join(monthdir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]
if not len(avg_files) == 12 :
print "Year %04d not finished yet, skipping yearly average..."%sd.year
else:
targetfile = os.path.join(yeardir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y')))
if not os.path.exists(targetfile):
print "Year %04d is complete, I have 12 months for the next file"%sd.year
command = ['ncra','-O']+ avg_files + [targetfile]
status = subprocess.check_call(command)
sd = nd
def longterm_avg(dacycle,avg):
""" Function to average a set of files in a folder from monthly to yearly means """
if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:
raise IOError,'Choice of averaging invalid'
analysisdir = dacycle['dir.analysis']
yeardir = os.path.join(analysisdir , 'data_%s_yearly'%avg )
longtermdir = os.path.join(analysisdir,'data_%s_longterm'%avg)
if not os.path.exists(longtermdir):
print "Creating new output directory " + longtermdir
os.makedirs(longtermdir)
files = os.listdir(yeardir)
files = [f for f in files if '-' in f and f.endswith('.nc')]
if not files:
print "No full year finished yet, skipping longterm average..."
return
dates = []
for filename in files:
date=datetime.datetime.strptime(filename.split('.')[-2],'%Y')
dates.append( date )
avg_files = [os.path.join(yeardir,k) for k in files]
if len(avg_files) > 0 :
command = ['ncra','-O']+ avg_files + [os.path.join(longtermdir,'%s_fluxes.%04d-%04d.nc'%(avg,min(dates).year, max(dates).year))]
status = subprocess.check_call(command)
if __name__ == "__main__":
from da.tools.initexit import CycleControl
sys.path.append('../../')
dacycle = CycleControl(args={'rc':'../../ctdas-ei-nobcb-zoom-ecoregions.rc'})
dacycle.setup()
dacycle.parse_times()
while dacycle['time.end'] < dacycle['time.finish']:
time_avg(dacycle,avg='flux1x1')
time_avg(dacycle,avg='transcom')
time_avg(dacycle,avg='transcom_extended')
time_avg(dacycle,avg='olson')
time_avg(dacycle,avg='olson_extended')
time_avg(dacycle,avg='country')
dacycle.advance_cycle_times()
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu.
This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation,
version 3. This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program. If not, see <http://www.gnu.org/licenses/>."""
#!/usr/bin/env python
# tools_country.py
"""
Author : peters
Revision History:
File created on 25 Jul 2008.
This module provides an interface to go from arrays of gridded data to aggregates for countries.
It uses the country information from the UNEP database, and the database created by them for this purpose. See:
http://na.unep.net/metadata/unep/GRID/GRIDCTRY.html
The module sets up a class 'countryinfo' that holds a number of attributes. These attributes can be used to
extract gridded information about the country. A build-in method agg_1x1 is provided for convencience.
The routine get_countrydict() creates a dictionary with this information for a large number of countries.
CAUTION:
The country data only covers the land areas of a nation and aggregation will exclude the fraction of a land covered
by oceans, sea, or open water. The aggregation will thus work best on arrays that are *not* defined by unit area!
"""
import sys
import cPickle
import os
sys.path.append('../../')
rootdir = os.getcwd().split('da/')[0]
analysisdir = os.path.join(rootdir, 'da/analysis')
from numpy import sum, array
try:
from dbfpy import dbf
except:
print "the python DBF lib might be needed, please install from:"
print "http://dbfpy.sourceforge.net/"
print " Trying to complete anyway..."
sys.path.append('../../')
from da.analysis.tools_regions import globarea
EU25 = ["Austria", "Belgium", "Cyprus", "Czech Republic", "Denmark", "Estonia", "Finland", "France", "Germany", "Greece", "Hungary", "Ireland", "Italy", "Latvia", "Lithuania", "Luxembourg", "Malta", "Netherlands", "Poland", "Portugal", "Slovakia", "Slovenia", "Spain", "Sweden", "United Kingdom"]
EU27 = ["Austria", "Belgium", "Bulgaria", "Cyprus", "Czech Republic", "Denmark", "Estonia", "Finland", "France", "Germany", "Greece", "Hungary", "Ireland", "Italy", "Latvia", "Lithuania", "Luxembourg", "Malta", "Netherlands", "Poland", "Portugal", "Romania", "Slovakia", "Slovenia", "Spain", "Sweden", "United Kingdom"]
G8 = [ "France", "Germany", "Italy", "Japan", "United Kingdom", "United States"]
annex1 = [ "Australia", "Austria", "Belarus", "Belgium", "Bulgaria", "Canada", "Croatia", "Czech Republic", "Denmark", "European Union", "Estonia", "Finland", "France", "Germany", "Greece", "Hungary", "Iceland", "Ireland", "Italy", "Japan", "Latvia", "Liechtenstein", "Lithuania", "Luxembourg", "Monaco", "Netherlands", "New Zealand", "Norway", "Poland", "Portugal", "Romania", "Russia", "Slovakia", "Slovenia", "Spain", "Sweden", "Switzerland", "Turkey", "Ukraine", "United Kingdom", "United States"]
annex2 = [ "Australia", "Austria", "Belgium", "Canada", "Denmark", "Finland", "France", "Germany", "Greece", "Iceland", "Ireland", "Italy", "Japan", "Luxembourg", "Netherlands", "New Zealand", "Norway", "Portugal", "Spain", "Sweden", "Switzerland", "Turkey", "United Kingdom", "United States"]
class countryinfo(object):
""" Defines the information about 1x1 gridboxes covering each country """
def __init__(self, code):
self.country = code
self.ngrids = 0
self.gridij = []
self.gridnr = []
self.gridcoords = []
self.gridlandmask = []
self.gridsharedocean = []
self.gridsharedborder = []
def add_gridinfo(self, grid_i, grid_j, gridlandmask, shared_border, shared_water):
""" add information from one gridbox to the object """
self.gridij.append((grid_j, grid_i,)) # add tuple with j,i coordinates
lon = -180 + grid_i + 0.5
lat = -90 + grid_j + 0.5
self.gridcoords.append((lat, lon,)) # add tuple with lon,lat coordinates
self.gridnr.append(grid_j * 360 + grid_i) # add grid number for take() function
self.gridlandmask.append(gridlandmask) # this gives the total fraction of land
self.gridsharedocean.append(shared_water)
self.gridsharedborder.append(shared_border)
self.ngrids = self.ngrids + 1
def agg_1x1(self, field):
""" aggregate a 1x1 input field to country total """
#print field.take(self.gridnr)
#print self.gridlandmask
return (field.take(self.gridnr) * array(self.gridlandmask)).sum()
def __str__(self):
return ' Country name : %s\n' % self.country + \
' Number of gridpoints : %d ' % self.ngrids
#' Gridpoint indices : %s ' % self.gridnr
def fix_eu(rec):
""" fix Czech Republic and Slovakia manually """
alternative_slov = {
'140202': (2.0, 28.1), \
'140201': (2.0, 34.0), \
'140200': (2.0, 44.0), \
'140199': (3.0, 25.5), \
'139203': (3.0, 18.9), \
'139202': (2.0, 57.5), \
'139201': (2.0, 59.7), \
'139200': (2.0, 87.2), \
'139199': (1.0, 100.0), \
'139198': (2.0, 72.8), \
'138198': (3.0, 7.7), \
'138199':(2.0, 10.0) }
alternative_czech = {
'141193': (2.0, 23.0), \
'141194': (2.0, 62.1), \
'141195': (3.0, 89.5), \
'141196': (2.0, 79.4), \
'141197': (2.0, 42.3), \
'141198': (2.0, 24.5), \
'141199': (2.0, 0.1), \
'140193': (2.0, 20.6), \
'140194': (2.0, 88.9), \
'140195': (1.0, 100.0), \
'140196': (1.0, 100.0), \
'140197': (1.0, 100.0), \
'140198': (1.0, 100.0), \
'140199': (3.0, 50.0), \
'139195': (2.0, 70.6), \
'139196': (2.0, 12.4), \
'139197': (2.0, 30.9), \
'139198': (2.0, 25.0) }
id = str(int(rec['GRID']))
for dict in [alternative_slov, alternative_czech]:
if id in dict:
rec['COVER_ID'] = dict[id][0]
rec['RATE_IN_GR'] = dict[id][1]
return rec
def get_countrydict():
""" Create a dictionary with grid-to-country information from a dbf file"""
countrydict = countryinfo('Test')
file = os.path.join(analysisdir,'country_dictionary.dat')
try:
countrydict = cPickle.load(open(file, 'rb'))
except:
db = dbf.Dbf(os.path.join(analysisdir,'GRIDCTRY.DBF'))
countrydict = {}
for n, rec in enumerate(db):
code = rec['COUNTRY']
gridid = str(int(rec['GRID']))
if code in ['Czech Republic', 'Slovakia']:
rec = fix_eu(rec)
rate_in_gr = rec['RATE_IN_GR'] * 1.e-2
i = int(gridid[-3::])
j = int(gridid[0:-3])
lat = -91 + j + 0.5
lon = -181 + i + 0.5
if code in countrydict:
a = countrydict[code]
else:
a = countryinfo(code)
shared_border = False
shared_water = False
if rec['COVER_ID'] == 0.0:
shared_border = False
shared_water = True
if rec['COVER_ID'] >= 2.0:
shared_border = True
if rec['COVER_ID'] >= 10.0:
shared_water = True
a.add_gridinfo(i - 1, j - 1, rate_in_gr, shared_border, shared_water)
countrydict[code] = a
db.close()
cPickle.dump(countrydict, open(file, 'wb'), -1)
return countrydict
if __name__ == "__main__":
countrydict = get_countrydict()
area = globarea()
areas = []
for k, v in countrydict.items():
ar = v.agg_1x1(area) / 1.e6
areas.append((ar, k))
areas.sort()
areas.reverse()
for a in areas: print a
v = countrydict['Ocean']
print v.agg_1x1(area)
v = countrydict['Netherlands']