Commit ffec3e5b authored by Woude, Auke van der's avatar Woude, Auke van der
Browse files

Merge branch 'feature-STILT'

parents e4fde471 2dbfe839
#!/bin/sh
#$ das.py
#$ co2
#$ nserial 1
#$ 06:30:00
#$ /bin/sh
echo "All output piped to file template.log"
#source /usr/local/Modules/3.2.8/init/sh
#source /opt/intel/bin/ifortvars.sh intel64
export HOST='daint'
#module load python
export icycle_in_job=999
python ctdas-stilt.py rc=ctdas-stilt.rc -v $1 >& ctdas-stilt.log &
Traceback (most recent call last):
File "ctdas-stilt.py", line 30, in <module>
from da.platform.cartesius import CartesiusPlatform
File "/nfs/home5/awoude/CTDAS/da/platform/cartesius.py", line 135
print 'output', output
^
SyntaxError: Missing parentheses in call to 'print'. Did you mean print('output', output)?
"""CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
updates of the code. See also: http://www.carbontracker.eu.
This program is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation,
version 3. This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program. If not, see <http://www.gnu.org/licenses/>."""
#!/usr/bin/env python
#################################################################################################
# First order of business is always to make all other python modules accessible through the path
#################################################################################################
import sys
import os
import logging
sys.path.append(os.getcwd())
#################################################################################################
# Next, import the tools needed to initialize a data assimilation cycle
#################################################################################################
from da.tools.initexit import start_logger, validate_opts_args, parse_options, CycleControl
from da.tools.pipeline import forward_pipeline, header, footer
from da.platform.cartesius import CartesiusPlatform
from da.baseclasses.dasystem import DaSystem
from da.baseclasses.statevector import StateVector
from da.carbondioxide.obspack_globalviewplus2 import ObsPackObservations
from da.carbondioxide.optimizer import CO2Optimizer
from da.baseclasses.observationoperator import ObservationOperator
#from da.analysis.expand_fluxes import save_weekly_avg_1x1_data, save_weekly_avg_state_data, save_weekly_avg_tc_data, save_weekly_avg_ext_tc_data
#from da.analysis.expand_molefractions import write_mole_fractions
#################################################################################################
# Parse and validate the command line options, start logging
#################################################################################################
start_logger()
opts, args = parse_options()
opts, args = validate_opts_args(opts, args)
#################################################################################################
# Create the Cycle Control object for this job
#################################################################################################
dacycle = CycleControl(opts, args)
platform = MaunaloaPlatform()
dasystem = DaSystem(dacycle['da.system.rc'])
obsoperator = ObservationOperator(dacycle['da.obsoperator.rc'])
samples = ObsPackObservations()
statevector = StateVector()
optimizer = CO2Optimizer()
##########################################################################################
################### ENTER THE PIPELINE WITH THE OBJECTS PASSED BY THE USER ###############
##########################################################################################
logging.info(header + "Entering Pipeline " + footer)
#ensemble_smoother_pipeline(dacycle, platform, dasystem, samples, statevector, obsoperator, optimizer)
forward_pipeline(dacycle, platform, dasystem, samples, statevector, obsoperator, optimizer)
##########################################################################################
################### All done, extra stuff can be added next, such as analysis
##########################################################################################
#logging.info(header + "Starting analysis" + footer)
#sys.exit(0)
#
#save_weekly_avg_1x1_data(dacycle, statevector)
#save_weekly_avg_state_data(dacycle, statevector)
#save_weekly_avg_tc_data(dacycle, statevector)
#save_weekly_avg_ext_tc_data(dacycle)
#write_mole_fractions(dacycle)
#
#sys.exit(0)
! CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
! Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
! updates of the code. See also: http://www.carbontracker.eu.
!
! This program is free software: you can redistribute it and/or modify it under the
! terms of the GNU General Public License as published by the Free Software Foundation,
! version 3. This program is distributed in the hope that it will be useful, but
! WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
! FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along with this
! program. If not, see <http://www.gnu.org/licenses/>.
! author: Wouter Peters
!
! This is a blueprint for an rc-file used in CTDAS. Feel free to modify it, and please go to the main webpage for further documentation.
!
! Note that rc-files have the convention that commented lines start with an exclamation mark (!), while special lines start with a hashtag (#).
!
! When running the script start_ctdas.sh, this /.rc file will be copied to your run directory, and some items will be replaced for you.
! The result will be a nearly ready-to-go rc-file for your assimilation job. The entries and their meaning are explained by the comments below.
!
!
! HISTORY:
!
! Created on August 20th, 2013 by Wouter Peters
!
!
! The time for which to start and end the data assimilation experiment in format YYYY-MM-DD HH:MM:SS
time.start : 2016-01-10 00:00:00
time.finish : 2016-01-15 00:00:00
! Whether to restart the CTDAS system from a previous cycle, or to start the sequence fresh. Valid entries are T/F/True/False/TRUE/FALSE
time.restart : False
! The length of a cycle is given in days, such that the integer 7 denotes the typically used weekly cycle. Valid entries are integers > 1
time.cycle : 7
! The number of cycles of lag to use for a smoother version of CTDAS. CarbonTracker CO2 typically uses 5 weeks of lag. Valid entries are integers > 0
time.nlag : 3
! The directory under which the code, input, and output will be stored. This is the base directory for a run. The word
! '/' will be replaced through the start_ctdas.sh script by a user-specified folder name. DO NOT REPLACE
dir.da_run : /projects/0/ctdas/awoude
! The resources used to complete the data assimilation experiment. This depends on your computing platform.
! The number of cycles per job denotes how many cycles should be completed before starting a new process or job, this
! allows you to complete many cycles before resubmitting a job to the queue and having to wait again for resources.
! Valid entries are integers > 0
da.resources.ncycles_per_job : 1
! The ntasks specifies the number of threads to use for the MPI part of the code, if relevant. Note that the CTDAS code
! itself is not parallelized and the python code underlying CTDAS does not use multiple processors. The chosen observation
! operator though might use many processors, like TM5. Valid entries are integers > 0
da.resources.ntasks : 1
! This specifies the amount of wall-clock time to request for each job. Its value depends on your computing platform and might take
! any form appropriate for your system. Typically, HPC queueing systems allow you a certain number of hours of usage before
! your job is killed, and you are expected to finalize and submit a next job before that time. Valid entries are strings.
da.resources.ntime : 04:00:00
! The resource settings above will cause the creation of a job file in which 2 cycles will be run, and 30 threads
! are asked for a duration of 4 hours
!
! Info on the DA system used, this depends on your application of CTDAS and might refer to for instance CO2, or CH4 optimizations.
!
da.system : CarbonTracker
! The specific settings for your system are read from a separate rc-file, which points to the data directories, observations, etc
da.system.rc : da/rc/carbontracker_random.rc
! This flag should probably be moved to the da.system.rc file. It denotes which type of filtering to use in the optimizer
da.system.localization : None
! Info on the observation operator to be used, these keys help to identify the settings for the transport model in this case
da.obsoperator : STILT
!
! The TM5 transport model is controlled by an rc-file as well. The value below refers to the configuration of the TM5 model to
! be used as observation operator in this experiment.
!
da.obsoperator.home : da/stilt
da.obsoperator.rc : ${da.obsoperator.home}/stilt.rc
!
! The number of ensemble members used in the experiment. Valid entries are integers > 2
!
da.optimizer.nmembers : 10
! Finally, info on the archive task, if any. Archive tasks are run after each cycle to ensure that the results of each cycle are
! preserved, even if you run on scratch space or a temporary disk. Since an experiment can take multiple weeks to complete, moving
! your results out of the way, or backing them up, is usually a good idea. Note that the tasks are commented and need to be uncommented
! to use this feature.
! The following key identifies that two archive tasks will be executed, one called 'alldata' and the other 'resultsonly'.
!task.rsync : alldata onlyresults
! The specifics for the first task.
! 1> Which source directories to back up. Valid entry is a list of folders separated by spaces
! 2> Which destination directory to use. Valid entries are a folder name, or server and folder name in rsync format as below
! 3> Which flags to add to the rsync command
! The settings below will result in an rsync command that looks like:
!
! rsync -auv -e ssh ${dir.da_run} you@yourserver.com:/yourfolder/
!
!task.rsync.alldata.sourcedirs : ${dir.da_run}
!task.rsync.alldata.destinationdir : you@yourserver.com:/yourfolder/
!task.rsync.alldata.flags g -auv -e ssh
! Repeated for rsync task 2, note that we only back up the analysis and output dirs here
!task.rsync.onlyresults.sourcedirs : ${dir.da_run}/analysis ${dir.da_run}/output
!task.rsync.onlyresults.destinationdir : you@yourserver.com:/yourfolder/
!task.rsync.onlyresults.flags : -auv -e ssh
# CarbonTracker Data Assimilation Shell (CTDAS) Copyright (C) 2017 Wouter Peters.
# Users are recommended to contact the developers (wouter.peters@wur.nl) to receive
# updates of the code. See also: http://www.carbontracker.eu.
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software Foundation,
# version 3. This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this
# program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------------------------------------------------
# CO2 concentration simulation based on WRF-STILT/HYSPLIT-NAM12 footprints and
# SiB3/4 biosphere fluxes, CarbonTracker background fluxes and boundary conditions
#
# created by W.He on Feb 20, 2015
#------------------------------------------------------------------------------------------------
# This is a very important message
# It, however, does nothing
source("/Users/wei/co2simu/Rcode/lpdm/rsource/load.ncdf4.r")
source("/Users/wei/co2simu/Rcode/empirical_boundary/src/id2info.r")
source("/Users/wei/co2simu/Rcode/empirical_boundary/src/julian.r")
source("/Users/wei/co2simu/Rcode/rsource/time.r")
source("/Users/wei/co2simu/Rcode/rsource/ddate.r")
source("/Users/wei/co2simu/Rcode/rsource/datelist.r")
source("/Users/wei/co2simu/Rcode/stilt-cvs-20090417/stiltR/sourceall.r")
source("/Users/wei/co2simu/Rcode/stilt-cvs-20090417/stiltR/Trajecfoot.r")
source("/Users/wei/co2simu/Rcode/stilt-cvs-20090417/stiltR/getgridp.r")
source("/Users/wei/co2simu/Rcode/rsource/assignr.r")
source("/Users/wei/co2simu/Rcode/output.ncdf4.r")
# #inputs and outputs
# sibdir="/Users/weihe/Documents/Concensimulation/sib3_fluxes/day1×1/"
# bgfdir="/Users/weihe/Documents/Concensimulation/bgfluxes/3-hourly/"
# footpdir="/Users/weihe/Documents/Concensimulation/stilt_footprints/" #read arlyn's calculated data
# bounddir="/Users/weihe/Documents/Concensimulation/co2_boundary/co2_total/"
# outdir="/Users/weihe/Documents/Concensimulation/resultsforctdas/"
#
# #scaling factors for 10-days time scale
# samdir="/Users/weihe/Documents/example_ctdas/input/"
stiltfile="stilt.rc"
curdir="/Storage/CO2/wei/ctdas-stilt-proto/exec/da/stilt/"
fn=paste(curdir, stiltfile,sep="")
conf=as.matrix(read.table(fn,header=FALSE))
sibdir = conf[1,3]
bgfdir = conf[2,3]
footpdir= conf[3,3]
bounddir= conf[4,3]
outdir = conf[5,3] #output
samdir = conf[6,3] #input
#----------------------------------------------------------------------------------------------
#Convolve footprints with sib hourly fluxes, for observations from both Aircraft and towers
#----------------------------------------------------------------------------------------------
endtime=240
foottimes=seq(0,endtime,1) #vector of times (backtimes) in hours between which footprint is computed
zbot=0 #lower vertical bound for influence projection, in meters agl
ztop=0 #upper vertical bound for influence projection, in meters agl
#if ztop set to zero, *surface* influence will be calculated
#set up an equivalent domain among footprints,fluxes, but not CO2 boundary
ncol2=66 #number of pixels in x directions in grid
nrow2=40 #number of pixels in y directions in grid
LLLon2=(-129) #lower left corner of grid
LLLat2=22 #lower left corner of grid
ResLon2=1 #resolution in degrees longitude
ResLat2=1 #resolution in degrees latitude
# constants for level-height transfrom
levelhgt=c(34.5,111.9,256.9,490.4,826.4,1274.1,1839.0,2524.0,3329.9,4255.6,5298.5,6453.8,7715.4,9076.6,
+ 10533.3,12108.3,13874.2,15860.1,18093.2,20590.0,24247.3,29859.6,35695.0,42551.5,80000.0)
#data from http://www.esrl.noaa.gov/gmd/ccgg/carbontracker-ch4/documentation_tm5.html
TBegin <- proc.time()
library(ncdf4)
#--------------------------------------------------------------------------------------------------------
#library(futile.logger)
#flog.appender(appender.file("/Users/weihe/Documents/Concensimulation/resultstest/stilt.log"), name='logger.b')
#flog.info("This writes to a logging %s", "file", name='logger.b')
#flog.warn("This statement has higher severity", name='logger.b')
#flog.fatal("This one is really scary", name='logger.b')
#--------------------------------------------------------------------------------------------------------
# site level: read scaling factors for all sites once
# member level: read boundary for all ensemble members once
#nsam= 150
#ndayscycle=10 # one cycle of footprints
#year=2010
#month=1
#day=19
nsam=as.numeric(conf[7,3]) #get from stilt.txt file =>replace with a dictionary style, not id order; also check parameters and report status
ndayscycle=as.numeric(conf[8,3])
startdate=conf[9,3]
year=as.numeric(substring(startdate,1,4))
month=as.numeric(substring(startdate,6,7))
day=as.numeric(substring(startdate,9,10))
#if(ndayscycle!=10)
# flog.warn("Sorry, only support the case nndayscycle equals 10", name='logger.b')
b2 = ISOdatetime(year,month,day,0,0,0,tz="UTC")
b1 = b2 - ndayscycle*24*3600 # for footprint
b3 = b1 + 2*ndayscycle*24*3600 # for flux
tb1 = paste(substring(b1,1,4),substring(b1,6,7),substring(b1,9,10),sep="")
tb2 = paste(substring(b2,1,4),substring(b2,6,7),substring(b2,9,10),sep="")
tb3 = paste(substring(b3,1,4),substring(b3,6,7),substring(b3,9,10),sep="")
scalefacarr1=array(NA, dim=c(ncol2,nrow2,nsam))
scalefacarr2=array(NA, dim=c(ncol2,nrow2,nsam))
# make CTDAS to generate domain for North America, read data partly?
#flog.info("Reading scaling factor files", name='logger.b')
for(i in 0:(nsam-1)) #parameters.000.2010010100_2010011100.nc
{
if (i<10)
ii = paste("00",i,sep="")
if (i<100 && i>=10)
ii = paste("0",i,sep="")
if (i>=100)
ii = i
ncf <- nc_open(paste(samdir,"parameters.",ii,".",tb1,"00","_",tb2,"00",".nc",sep=""))
scalefac <- ncvar_get(ncf,"parametermap",start=c(52,113),count=c(ncol2,nrow2)) #temporary 42 #real52:117,113:152,start=c(52,113),count=c(66,40)
scalefacarr1[,,i+1]=scalefac
ncf <- nc_open(paste(samdir,"parameters.",ii,".",tb2,"00","_",tb3,"00",".nc",sep=""))
scalefac <- ncvar_get(ncf,"parametermap",start=c(52,113),count=c(ncol2,nrow2))
scalefacarr2[,,i+1]=scalefac
}
#---------------------------------------------------------------------------------
# Centering at the state vector especially the ready-optimized cycle, look for data
# from corresponding period for optimzation
#---------------------------------------------------------------------------------
# according to b1, b2, decide which months
m1=substring(b2,6,7)
m2=substring(b3,6,7)
umon=unique(c(m1,m2))
#flog.info("Determing ready-use footprint files", name='logger.b')
fns=NULL
for(mm in 1:length(umon))
{
pfbpath=paste(footpdir,year,"/",umon[mm],"/",sep="") #"stilt2010x01x01x18x59x33.4057Nx081.8334Wx00285.nc"
tmp=list.files(pfbpath,pattern="nc", all=T)
fns=c(fns,tmp)
}
# read needed event ids from sample_coordinates.***.nc files(e.g, sample_coordinates_2010011100_2010012100.nc)
ncf <- nc_open(paste(samdir,"sample_coordinates_",tb2,"00","_",tb3,"00",".nc",sep=""))
eventidarr <- ncvar_get(ncf,"obs_num")
# filter before loop
pfbfns=NULL
fn=NULL
for(mm in 1:length(fns))
{
#select: Uniform time to make it start from the same point
#yr=substring(fns[mm],6,9) #"2010-01-09 09:00:00 UTC"
#mo=substring(fns[mm],11,12)
#dy=substring(fns[mm],14,15)
#bISO = ISOdate(yr,mo,dy)
#diff1=as.numeric(bISO - b2)
#diff2=as.numeric(bISO - b3)
#if (diff1 >= 0 && diff2 <= 0) # in the time span
# pfbfns = c(pfbfns,fns[mm])
eid=as.numeric(substring(fns[mm],48,53))
for(i in 1:length(eventidarr))
{
if(eid==eventidarr[i])
{
pfbfns = c(pfbfns,fns[mm])
break
}
}
}
#log=paste("For state vectors from ",b1,"to",b2,",",length(pfbfns),"observations have been found")
#flog.info(log, name='logger.b')
# for fluxes, the current 10 days using scaling factors
#flog.info("Start convolving for all footprint files", name='logger.b')
newfile=T #output results into a new file
for(mm in 1:length(pfbfns))
{
# Read pfb file
#mm=1
mo=substring(pfbfns[mm],11,12)
fn=paste(footpdir,year,"/",mo,"/",pfbfns[mm],sep="")
footp=load.ncdf(fn)
footnc <- nc_open(fn)
foot <- ncvar_get(footnc,"foot1",start=c(41,12,1),count=c(ncol2,nrow2,-1))
# Read footprint directly
#ft=footp$foot1 #120*70*240
#foot=array(NA, dim=c(40,66,240))
#for (i in 1:240)
# foot[,,i]=t(as.matrix(ft[41:106,12:51,i])) #113:152
# get info form path srings
ident=substring(pfbfns[mm],6,46)
eventid=substring(pfbfns[mm],48,53)
print(ident)
# flog.info("Convolving for %s",ident, name='logger.b')
info=id2info(ident) #interpret info from file names
#--------------------------------------------------------
# STEP 1: Creates footprint for individual particle runs
#--------------------------------------------------------
#foot1=Trajecfoot(ident=ident,part=part, pathname="",foottimes=foottimes,zlim=c(zbot,ztop),fluxweighting=NULL,coarse=1,vegpath=vegpath,
# numpix.x=ncol1,numpix.y=nrow1,lon.ll=LLLon1,lat.ll=LLLat1,lon.res=ResLon1,lat.res=ResLat1)
# for different spatial resolutions between biosphere fluxes and background fluxes, two footprints are planned to be used, and to the end, we picked up the concentration values.
#foot2=Trajecfoot(ident=ident,part=part, pathname="",foottimes=foottimes,zlim=c(zbot,ztop),fluxweighting=NULL,coarse=1,vegpath=vegpath,
# numpix.x=ncol2,numpix.y=nrow2,lon.ll=LLLon2,lat.ll=LLLat2,lon.res=ResLon2,lat.res=ResLat2)
if(length(foot)>100)
{
inityr=as.numeric(substring(ident,1,4))
initmo=as.numeric(substring(ident,6,7))
initdy=as.numeric(substring(ident,9,10))
inithr=as.numeric(substring(ident,12,13))
initmi=as.numeric(substring(ident,15,16)) #by He
inihgt=as.numeric(substring(ident,39,41))
# get the time stamp for each foot step, going back time given by "endtime"
xx=ISOdatetime(inityr,initmo,initdy,inithr,initmi,0,tz="UTC")
yy=xx+(-foottimes*3600)
cyy=as.character(yy)
yrlist=substring(cyy,1,4)
monlist=substring(cyy,6,7)
daylist=substring(cyy,9,10)
hrlist=substring(cyy,12,13)
milist=substring(cyy,15,16)
# get unique months and days
daystring=paste(yrlist,monlist,daylist, sep="")
udaystring=unique(daystring)
yrmonstring=paste(yrlist,monlist, sep="")
uyrmonstring=unique(yrmonstring)
#kk=length(uyrmonstring) #needs July (former month)
#current month
sibyr=substring(uyrmonstring[1],1,4)
sibmon=substring(uyrmonstring[1],5,6)
#----------------------------------------------------------------------------------
# STEP 2: Read boundary conditions & use end points tracing to
# 1) get responding fluxes & convolve with footprints
# 2) get the "actural" concentrations
#----------------------------------------------------------------------------------
# get endpoints
endpts=footp$endpts
endptna=footp$endptsnames
#endpts=ncvar_get(footnc,"endpts")
#endptna=ncvar_get(footnc,"endptsnames")
colnames(endpts)=endptna #=c("time","index","lat","lon","agl","grdht","temp","pres")
endptsdate=footp$endptsdate #2010-08-04 20:18:00 UTC
#endptsdate=ncvar_get(footnc,"endptsdate")
latarr=endpts[,3]
lonarr=endpts[,4]
hgtarr=endpts[,5] + endpts[,6] #agl+grdht
# analyze the dates to recognize how many data to read
dd=as.character(endptsdate)
yrlist=substring(dd,1,4)
monlist=substring(dd,6,7)
daylist=substring(dd,9,10)
hrlist=substring(dd,12,13)
milist=substring(dd,15,16)
# get unique months and days
daystring=paste(yrlist,monlist,daylist, sep="")
udaystring=unique(daystring)
yrmonstring=paste(yrlist,monlist, sep="")
uyrmonstring=unique(yrmonstring)
#--------------------------------------------------------------
# 2-1: Read fluxes and boundary data
#--------------------------------------------------------------
ndays=length(udaystring)
bouarr=array(0,dim=c(ndays,120,90,25,8)) #boundary : lon,lat,hgt,time
#biospheric fluxes
nrow_flux1=181
ncol_flux1=361 #288
gpp=array(NA, dim=c(ndays,ncol_flux1,nrow_flux1,24))
rec=array(NA, dim=c(ndays,ncol_flux1,nrow_flux1,24))
pla=array(NA, dim=c(ndays,ncol_flux1,nrow_flux1,24))
soi=array(NA, dim=c(ndays,ncol_flux1,nrow_flux1,24))
#other fluxes #(360,180,8)
nrow_flux2=180
ncol_flux2=360
ocn=array(NA, dim=c(ndays,ncol_flux2,nrow_flux2,8))
fos=array(NA, dim=c(ndays,ncol_flux2,nrow_flux2,8))
fir=array(NA, dim=c(ndays,ncol_flux2,nrow_flux2,8))
ntimes1=ndays*24
ntimes2=ndays*8
for(d in 1:ndays)
{
datestr=udaystring[d]
yr=substr(datestr,1,4)
mn=substr(datestr,5,6)
dy=substr(datestr,7,8)
#bou=load.ncdf(paste(bounddir,"CT2013B.molefrac_nam1x1_",yr,"-",mn,"-",dy,".nc",sep=""))
bou=load.ncdf(paste(bounddir,"CT2013B.molefrac_glb3x2_",yr,"-",mn,"-",dy,".nc",sep=""))
# co2(date, level, lat, lon) / ocn_flux_opt(date, lat, lon)
biof=load.ncdf(paste(sibdir,"SiB3.hourly.flux1x1.global.",yr,mn,dy,".nc",sep=""))
bgf=load.ncdf(paste(bgfdir,"CT2013B.flux1x1.",yr,mn,dy,".nc",sep=""))
bouarr[d,,,,]=bou$co2 # boundary CO2
gpp[d,,,]=biof$gpp
rec[d,,,]=biof$rtotal
pla[d,,,]=biof$ocs.gpp
soi[d,,,]=biof$ocs.soil
ocn[d,,,]=bgf$ocn.flux.opt # here we can read less, so change the sizes of ocnarr
fos[d,,,]=bgf$fossil.flux.imp
fir[d,,,]=bgf$fire.flux.imp
remove(list=c("bou","biof","bgf"))
}
#--------------------------------------------------------------
# 2-2: prepare data for calculations
#--------------------------------------------------------------
dateall1=rep(ISOdatetime(0,0,0,0,0,0,tz="UTC"), ntimes1)
dateall2=rep(ISOdatetime(0,0,0,0,0,0,tz="UTC"), ntimes2)
gppflux=array(NA, dim=c(ncol2,nrow2,ntimes1))
recflux=array(NA, dim=c(ncol2,nrow2,ntimes1))