merge_ctdas_runs.py 4.09 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters. 
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu. 

This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation, 
version 3. This program is distributed in the hope that it will be useful, but 
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. 

You should have received a copy of the GNU General Public License along with this 
program. If not, see <http://www.gnu.org/licenses/>."""
Peters, Wouter's avatar
Peters, Wouter committed
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
#!/usr/bin/env python
# merge_ctdas_runs.py

"""
Author : peters 

Revision History:
File created on 14 Jul 2014.

This scrip merges the analysis directory from multiple projects into one new folder.
It steps over existing analysis output files from weekly means, and then averages these to daily/monthy/yearly values.

"""

import datetime as dt
import os
import sys
import shutil
31
from . import time_avg_fluxes as tma
Peters, Wouter's avatar
Peters, Wouter committed
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62

basedir = '/Storage/CO2/ingrid/'
basedir2 = '/Storage/CO2/peters/'
targetproject = 'geocarbon-ei-sibcasa-gfed4-zoom-gridded-combined-convec-20011230-20130101'
targetdir = os.path.join(basedir2,targetproject)

sources = {
            '2000-01-01 through 2011-12-31': os.path.join(basedir,'carbontracker','geocarbon-ei-sibcasa-gfed4-zoom-gridded-convec-combined'),
            '2012-01-01 through 2012-12-31': os.path.join(basedir2,'geocarbon-ei-sibcasa-gfed4-zoom-gridded-convec-20111231-20140101'),
            }

dirs = ['flux1x1','transcom','country','olson']

dacycle = {}
dacycle['time.start'] = dt.datetime(2000,12,30)
dacycle['time.end'] = dt.datetime(2013,1,1)
dacycle['cyclelength'] = dt.timedelta(days=7)
dacycle['dir.analysis'] = os.path.join(targetdir,'analysis')


if __name__ == "__main__":

    if not os.path.exists(targetdir):
        os.makedirs(targetdir)
    if not os.path.exists(os.path.join(targetdir,'analysis')):
        os.makedirs(os.path.join(targetdir,'analysis') )
    for nam in dirs:
        if not os.path.exists(os.path.join(targetdir,'analysis','data_%s_weekly'%nam)):
            os.makedirs(os.path.join(targetdir,'analysis','data_%s_weekly'%nam) )

    timedirs=[]
63
    for ss,vv in sources.items():
Peters, Wouter's avatar
Peters, Wouter committed
64
65
66
67
        sds,eds = ss.split(' through ')
        sd = dt.datetime.strptime(sds,'%Y-%m-%d')
        ed = dt.datetime.strptime(eds,'%Y-%m-%d')
        timedirs.append([sd,ed,vv])
68
        print(sd,ed, vv)
Peters, Wouter's avatar
Peters, Wouter committed
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104

    while dacycle['time.start'] < dacycle['time.end']:

        # copy the weekly flux1x1 file from the original dir to the new project dir

        for td in timedirs:
            if dacycle['time.start'] >= td[0] and dacycle['time.start'] <= td[1]:
                indir=td[2]

        # Now time avg new fluxes

        infile = os.path.join(indir,'analysis','data_flux1x1_weekly','flux_1x1.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) ) 
        #print os.path.exists(infile),infile
        shutil.copy(infile,infile.replace(indir,targetdir) )
        tma.time_avg(dacycle,avg='flux1x1')

        infile = os.path.join(indir,'analysis','data_transcom_weekly','transcom_fluxes.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) ) 
        #print os.path.exists(infile),infile
        shutil.copy(infile,infile.replace(indir,targetdir) )
        tma.time_avg(dacycle,avg='transcom')

        infile = os.path.join(indir,'analysis','data_olson_weekly','olson_fluxes.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) ) 
        #print os.path.exists(infile),infile
        shutil.copy(infile,infile.replace(indir,targetdir) )
        tma.time_avg(dacycle,avg='olson')

        infile = os.path.join(indir,'analysis','data_country_weekly','country_fluxes.%s.nc'%(dacycle['time.start'].strftime('%Y-%m-%d') ) ) 
        #print os.path.exists(infile),infile
        shutil.copy(infile,infile.replace(indir,targetdir) )
        tma.time_avg(dacycle,avg='country')

        dacycle['time.start'] += dacycle['cyclelength']