From 7e83a098d70248f8fd14f58748838711e6d3c44d Mon Sep 17 00:00:00 2001
From: brunner <amvdw95@gmail.com>
Date: Thu, 8 Nov 2018 16:03:49 +0000
Subject: [PATCH]

---
 da/analysis/expand_molefractions.py         |  12 +-
 da/baseclasses/dasystem.py                  |   8 +-
 da/baseclasses/observationoperator.py       |  54 ++++-----
 da/baseclasses/optimizer.py                 |   4 +-
 da/baseclasses/platform.py                  |   4 +-
 da/baseclasses/statevector.py               |   6 +-
 da/carbondioxide/dasystem.py                |   4 +-
 da/carbondioxide/obs.py                     |  10 +-
 da/carbondioxide/obspack.py                 |  16 +--
 da/carbondioxide/obspack_geocarbon.py       |  24 ++--
 da/carbondioxide/obspack_globalviewplus.py  |  24 ++--
 da/carbondioxide/obspack_globalviewplus2.py |  60 +++++-----
 da/carbondioxide/statevector.py             |   4 +-
 da/co2gridded/dasystem.py                   |   4 +-
 da/co2gridded/statevector.py                |   2 +-
 da/co2gridded/statevectorNHgridded.py       |   2 +-
 da/methane/dasystem.py                      |   2 +-
 da/methane/initexit.py                      |   6 +-
 da/methane/obs.py                           |   8 +-
 da/methane/pipeline.py                      |  10 +-
 da/platform/capegrim.py                     |   4 +-
 da/platform/cartesius.py                    |   4 +-
 da/platform/fmi.py                          |   4 +-
 da/platform/huygens.py                      |   4 +-
 da/platform/jet.py                          |   4 +-
 da/platform/kermadec.py                     |   4 +-
 da/platform/maunaloa.py                     |   4 +-
 da/sf6/pipeline.py                          |   8 +-
 da/stilt/expand_molefractions.py            |  12 +-
 da/stilt/obspack.py                         |  16 +--
 da/stilt/pipeline.py                        |  10 +-
 da/stilt/statevector.py                     |   2 +-
 da/tm5/methaneobservationoperator.py        |   2 +-
 da/tm5/observationoperator.py               |   8 +-
 da/tools/general.py                         |  22 ++--
 da/tools/initexit.py                        |  42 +++----
 da/tools/io4.py                             | 118 ++++++++++----------
 da/tools/pipeline.py                        |  10 +-
 da/tools/rc.py                              |  36 +++---
 da/tools/rc_old.py                          |   4 +-
 template.rc                                 |   6 +-
 41 files changed, 294 insertions(+), 294 deletions(-)

diff --git a/da/analysis/expand_molefractions.py b/da/analysis/expand_molefractions.py
index 4d614d04..abcc2b08 100755
--- a/da/analysis/expand_molefractions.py
+++ b/da/analysis/expand_molefractions.py
@@ -110,7 +110,7 @@ def write_mole_fractions(dacycle):
         fc_simulated = ncf_fc_in.get_variable('modelsamplesmean_prior')
         fc_simulated_ens = ncf_fc_in.get_variable('modelsamplesdeviations_prior')
         fc_flag      = ncf_fc_in.get_variable('flag')
-        if not dacycle.dasystem.has_key('opt.algorithm'):
+        if 'modeldatamismatchvariance' not in dacycle.dasystem:
             fc_r         = ncf_fc_in.get_variable('modeldatamismatchvariance')
             fc_hphtr     = ncf_fc_in.get_variable('totalmolefractionvariance')
         elif dacycle.dasystem['opt.algorithm'] == 'serial':
@@ -172,10 +172,10 @@ def write_mole_fractions(dacycle):
 
             # get nobs dimension
 
-            if ncf_out.dimensions.has_key('id'): 
+            if 'id' in ncf_out.dimensions:
                 dimidob = ncf_out.dimensions['id']
                 dimid = ('id',)
-            elif ncf_out.dimensions.has_key('obs'): 
+            elif 'obs' in ncf_out.dimensions:
                 dimidob = ncf_out.dimensions['obs']
                 dimid = ('obs',)
 
@@ -273,11 +273,11 @@ def write_mole_fractions(dacycle):
 
         # Get existing file obs_nums to determine match to local obs_nums
 
-        if ncf_out.variables.has_key('merge_num'):
+        if 'merge_num' in ncf_out.variables:
             file_obs_nums = ncf_out.get_variable('merge_num')
-        elif ncf_out.variables.has_key('obspack_num'):
+        elif 'obspack_num' in ncf_out.variables:
             file_obs_nums = ncf_out.get_variable('obspack_num')
-        elif ncf_out.variables.has_key('id'):
+        elif 'id' in ncf_out.variables:
             file_obs_nums = ncf_out.get_variable('id')
 
         # Get all obs_nums related to this file, determine their indices in the local arrays
diff --git a/da/baseclasses/dasystem.py b/da/baseclasses/dasystem.py
index 250ff546..6439eaa3 100755
--- a/da/baseclasses/dasystem.py
+++ b/da/baseclasses/dasystem.py
@@ -70,7 +70,7 @@ class DaSystem(dict):
         """ 
         This method loads a DA System Info rc-file with settings for this simulation 
         """
-        for k, v in rc.read(rcfilename).iteritems():
+        for k, v in rc.read(rcfilename).items():
             self[k] = v
         
         logging.debug("DA System Info rc-file (%s) loaded successfully" % rcfilename)
@@ -82,17 +82,17 @@ class DaSystem(dict):
         """
         needed_rc_items = {}
 
-        for k, v in self.iteritems():
+        for k, v in self.items():
             if v == 'True' : 
                 self[k] = True
             if v == 'False': 
                 self[k] = False
 
         for key in needed_rc_items:
-            if not self.has_key(key):
+            if key not in self:
                 msg = 'Missing a required value in rc-file : %s' % key
                 logging.error(msg)
-                raise IOError, msg
+                raise IOError(msg)
         logging.debug('DA System Info settings have been validated succesfully')
 
 ################### End Class DaSystem ###################
diff --git a/da/baseclasses/observationoperator.py b/da/baseclasses/observationoperator.py
index 4b61b2ce..54a2f349 100755
--- a/da/baseclasses/observationoperator.py
+++ b/da/baseclasses/observationoperator.py
@@ -66,17 +66,17 @@ class ObservationOperator(object):
         """ Perform all steps necessary to start the observation operator through a simple Run() call """
 
         self.dacycle = dacycle
-	self.outputdir = dacycle['dir.output']
+        self.outputdir = dacycle['dir.output']
 
     def prepare_run(self):
         """ Prepare the running of the actual forecast model, for example compile code """
 
-	import os
+        import os
 
-	# Define the name of the file that will contain the modeled output of each observation
+        # Define the name of the file that will contain the modeled output of each observation
 
-    	self.simulated_file = os.path.join(self.outputdir, 'samples_simulated.%s.nc' % self.dacycle['time.sample.stamp'])
-    	self.forecast_nmembers = int(self.dacycle['da.optimizer.nmembers'])
+        self.simulated_file = os.path.join(self.outputdir, 'samples_simulated.%s.nc' % self.dacycle['time.sample.stamp'])
+        self.forecast_nmembers = int(self.dacycle['da.optimizer.nmembers'])
 
     def validate_input(self):
         """ Make sure that data needed for the ObservationOperator (such as observation input lists, or parameter files)
@@ -86,21 +86,21 @@ class ObservationOperator(object):
         """ Write the data that is needed for a restart or recovery of the Observation Operator to the save directory """
 
     def run(self):
-	"""
-	 This Randomizer will take the original observation data in the Obs object, and simply copy each mean value. Next, the mean 
-	 value will be perturbed by a random normal number drawn from a specified uncertainty of +/- 2 ppm
-	"""
+        """
+         This Randomizer will take the original observation data in the Obs object, and simply copy each mean value. Next, the mean 
+         value will be perturbed by a random normal number drawn from a specified uncertainty of +/- 2 ppm
+        """
 
-	import da.tools.io4 as io
-	import numpy as np
+        import da.tools.io4 as io
+        import numpy as np
 
-	# Create a flask output file in TM5-style (to be updated later?) to hold simulated values for later reading
+        # Create a flask output file in TM5-style (to be updated later?) to hold simulated values for later reading
 
-    	f = io.CT_CDF(self.simulated_file, method='create')
-    	logging.debug('Creating new simulated observation file in ObservationOperator (%s)' % self.simulated_file)
-	
+        f = io.CT_CDF(self.simulated_file, method='create')
+        logging.debug('Creating new simulated observation file in ObservationOperator (%s)' % self.simulated_file)
+        
         dimid = f.createDimension('obs_num', size=None)
-	dimid = ('obs_num',)
+        dimid = ('obs_num',)
         savedict = io.std_savedict.copy() 
         savedict['name'] = "obs_num"
         savedict['dtype'] = "int"
@@ -111,7 +111,7 @@ class ObservationOperator(object):
         f.add_data(savedict,nsets=0)
 
         dimmember = f.createDimension('nmembers', size=self.forecast_nmembers)
-	dimmember = ('nmembers',)
+        dimmember = ('nmembers',)
         savedict = io.std_savedict.copy() 
         savedict['name'] = "flask"
         savedict['dtype'] = "float"
@@ -121,28 +121,28 @@ class ObservationOperator(object):
         savedict['comment'] = "Simulated model value created by RandomizerObservationOperator"
         f.add_data(savedict,nsets=0)
 
-	# Open file with x,y,z,t of model samples that need to be sampled
+        # Open file with x,y,z,t of model samples that need to be sampled
 
         f_in = io.ct_read(self.dacycle['ObsOperator.inputfile'],method='read') 
 
-	# Get simulated values and ID
+        # Get simulated values and ID
 
         ids = f_in.get_variable('obs_num')
         obs = f_in.get_variable('observed')
         mdm = f_in.get_variable('modeldatamismatch')
 
-	# Loop over observations, add random white noise, and write to file
+        # Loop over observations, add random white noise, and write to file
 
-	for i,data in enumerate(zip(ids,obs,mdm)):
-	    f.variables['obs_num'][i] = data[0]		
-	    f.variables['flask'][i,:] = data[1]+np.random.randn(self.forecast_nmembers)*data[2]
+        for i,data in enumerate(zip(ids,obs,mdm)):
+            f.variables['obs_num'][i] = data[0]
+            f.variables['flask'][i,:] = data[1]+np.random.randn(self.forecast_nmembers)*data[2]
 
-	f.close()
-	f_in.close()
+        f.close()
+        f_in.close()
 
-	# Report success and exit
+        # Report success and exit
 
-    	logging.info('ObservationOperator finished successfully, output file written (%s)' % self.simulated_file)
+        logging.info('ObservationOperator finished successfully, output file written (%s)' % self.simulated_file)
 
     def run_forecast_model(self):
         self.prepare_run()
diff --git a/da/baseclasses/optimizer.py b/da/baseclasses/optimizer.py
index e05b9fb0..82e67089 100755
--- a/da/baseclasses/optimizer.py
+++ b/da/baseclasses/optimizer.py
@@ -124,8 +124,8 @@ class Optimizer(object):
                 allids.extend(samples.getvalues('id'))
 
                 simulatedensemble = samples.getvalues('simulated')
-		for s in range(simulatedensemble.shape[0]):
-                	allsimulated.append(simulatedensemble[s])
+                for s in range(simulatedensemble.shape[0]):
+                        allsimulated.append(simulatedensemble[s])
 
         self.obs[:] = np.array(allobs)
         self.obs_ids[:] = np.array(allids)
diff --git a/da/baseclasses/platform.py b/da/baseclasses/platform.py
index fbdf0cb3..6243f3a8 100755
--- a/da/baseclasses/platform.py
+++ b/da/baseclasses/platform.py
@@ -95,11 +95,11 @@ class Platform(object):
             template += """#$ -hold_jid depends \n"""
 
         # First replace from passed dictionary
-        for k, v in joboptions.iteritems():
+        for k, v in joboptions.items():
             while k in template:
                 template = template.replace(k, v)
         # Fill remaining values with std_options
-        for k, v in std_joboptions.iteritems():
+        for k, v in std_joboptions.items():
             while k in template:
                 template = template.replace(k, v)
         return template
diff --git a/da/baseclasses/statevector.py b/da/baseclasses/statevector.py
index cf5ce63d..94cb6e97 100755
--- a/da/baseclasses/statevector.py
+++ b/da/baseclasses/statevector.py
@@ -156,7 +156,7 @@ class StateVector(object):
         # These list objects hold the data for each time step of lag in the system. Note that the ensembles for each time step consist 
         # of lists of EnsembleMember objects, we define member 0 as the mean of the distribution and n=1,...,nmembers as the spread.
 
-        self.ensemble_members = range(self.nlag)
+        self.ensemble_members = list(range(self.nlag))
 
         for n in range(self.nlag):
             self.ensemble_members[n] = []
@@ -500,7 +500,7 @@ class StateVector(object):
             raise ValueError
 
         result = np.zeros((self.nparams,), float)
-        for k, v in self.griddict.iteritems():
+        for k, v in self.griddict.items():
             #print k,k-1,result.shape, v
             if method == "avg": 
                 result[k - 1] = griddata.take(v).mean()
@@ -528,7 +528,7 @@ class StateVector(object):
 
         """
         result = np.zeros(self.gridmap.shape, float)
-        for k, v in self.griddict.iteritems():
+        for k, v in self.griddict.items():
             #print k,v
             result.put(v, vectordata[k - 1])
         return result         
diff --git a/da/carbondioxide/dasystem.py b/da/carbondioxide/dasystem.py
index 47d78a29..35204b61 100755
--- a/da/carbondioxide/dasystem.py
+++ b/da/carbondioxide/dasystem.py
@@ -46,14 +46,14 @@ class CO2DaSystem(DaSystem):
                            'regtype']
 
 
-        for k, v in self.iteritems():
+        for k, v in self.items():
             if v == 'True' : 
                 self[k] = True
             if v == 'False': 
                 self[k] = False
 
         for key in needed_rc_items:
-            if not self.has_key(key):
+            if key not in self:
                 logging.warning('Missing a required value in rc-file : %s' % key)
         logging.debug('DA System Info settings have been validated succesfully')
 
diff --git a/da/carbondioxide/obs.py b/da/carbondioxide/obs.py
index 2b942f12..775feb95 100755
--- a/da/carbondioxide/obs.py
+++ b/da/carbondioxide/obs.py
@@ -56,7 +56,7 @@ class CO2Observations(Observations):
         if not os.path.exists(filename):
             msg = 'Could not find  the required observation input file (%s) ' % filename
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
         else:
             self.obs_filename = filename
         self.datalist = []
@@ -118,7 +118,7 @@ class CO2Observations(Observations):
             logging.error(msg)
             logging.error("Did the sampling step succeed?")
             logging.error("...exiting")
-            raise IOError, msg
+            raise IOError(msg)
 
         ncf = io.ct_read(filename, method='read')
         ids = ncf.get_variable('obs_num')
@@ -129,7 +129,7 @@ class CO2Observations(Observations):
         obs_ids = self.getvalues('id')
 
         obs_ids = obs_ids.tolist()
-        ids = map(int, ids)
+        ids = list(map(int, ids))
 
         missing_samples = []
 
@@ -265,7 +265,7 @@ class CO2Observations(Observations):
         if not os.path.exists(filename):
             msg = 'Could not find  the required sites.rc input file (%s)' % filename
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
         else:
             self.sites_file = filename
 
@@ -308,7 +308,7 @@ class CO2Observations(Observations):
 
         for obs in self.datalist:
             obs.mdm = 1000.0  # default is very high model-data-mismatch, until explicitly set by script
-            if site_info.has_key(obs.code): 
+            if obs.code in site_info:
                 logging.debug("Observation found (%s)" % obs.code)
                 obs.mdm = site_info[obs.code]['error'] * self.global_R_scaling
                 obs.may_localize = site_info[obs.code]['may_localize']
diff --git a/da/carbondioxide/obspack.py b/da/carbondioxide/obspack.py
index 1a2c44ce..ceab0138 100755
--- a/da/carbondioxide/obspack.py
+++ b/da/carbondioxide/obspack.py
@@ -24,7 +24,7 @@ import os
 import sys
 import logging
 import datetime as dtm
-from string import strip
+#from string import strip
 from numpy import array, logical_and
 
 sys.path.append(os.getcwd())
@@ -55,7 +55,7 @@ class ObsPackObservations(Observations):
         if not os.path.exists(op_dir):
             msg = 'Could not find  the required ObsPack distribution (%s) ' % op_dir
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
         else:
             self.obspack_dir = op_dir
             self.obspack_id = op_id
@@ -102,7 +102,7 @@ class ObsPackObservations(Observations):
             ids = ncf.get_variable('obspack_num').take(subselect)  # or should we propagate obs_num which is not unique across datasets??
             evn = ncf.get_variable('obspack_id').take(subselect, axis=0)
             evn = [s.tostring().lower() for s in evn]
-            evn = map(strip, evn)
+            evn = map(str, evn)
             site = ncf.get_attribute('site_code')
             lats = ncf.get_variable('latitude').take(subselect, axis=0)
             lons = ncf.get_variable('longitude').take(subselect, axis=0)
@@ -113,7 +113,7 @@ class ObsPackObservations(Observations):
             strategy = 1
             flags = ncf.get_variable('qc_flag').take(subselect, axis=0)
             flags = [s.tostring().lower() for s in flags]
-            flags = map(strip, flags)
+            flags = map(str, flags)
             flags = [int(f == '...') for f in flags]
             ncf.close()
 
@@ -132,7 +132,7 @@ class ObsPackObservations(Observations):
             logging.error(msg)
             logging.error("Did the sampling step succeed?")
             logging.error("...exiting")
-            raise IOError, msg
+            raise IOError(msg)
 
         ncf = io.ct_read(filename, method='read')
         ids = ncf.get_variable('obs_num')
@@ -141,7 +141,7 @@ class ObsPackObservations(Observations):
         logging.info("Successfully read data from model sample file (%s)" % filename)
 
         obs_ids = self.getvalues('id').tolist()
-        ids = map(int, ids)
+        ids = list(map(int, ids))
 
         missing_samples = []
 
@@ -296,7 +296,7 @@ class ObsPackObservations(Observations):
         if not os.path.exists(filename):
             msg = 'Could not find  the required sites.rc input file (%s) ' % filename
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
         else:
             self.sites_file = filename
 
@@ -348,7 +348,7 @@ class ObsPackObservations(Observations):
 
             identifier = name_convert(name="%s_%s_%s" % (site.lower(), method.lower(), lab.lower(),), to='GV')
 
-            if site_info.has_key(identifier): 
+            if identifier in site_info:
                 logging.debug("Observation found (%s, %s)" % (obs.code, identifier))
                 obs.mdm = site_info[identifier]['error'] * self.global_R_scaling
                 obs.may_localize = site_info[identifier]['may_localize']
diff --git a/da/carbondioxide/obspack_geocarbon.py b/da/carbondioxide/obspack_geocarbon.py
index 5f184dfd..26b54962 100755
--- a/da/carbondioxide/obspack_geocarbon.py
+++ b/da/carbondioxide/obspack_geocarbon.py
@@ -52,7 +52,7 @@ class ObsPackObservations(Observations):
         if not os.path.exists(op_dir):
             msg = 'Could not find  the required ObsPack distribution (%s) ' % op_dir
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
         else:
             self.obspack_dir = op_dir
             self.obspack_id = op_id
@@ -101,7 +101,7 @@ class ObsPackObservations(Observations):
             obspacknum = ncf.get_variable('obspack_num').take(subselect)  # or should we propagate obs_num which is not unique across datasets??
             obspackid = ncf.get_variable('obspack_id').take(subselect, axis=0)
             obspackid = [s.tostring().lower() for s in obspackid]
-            obspackid = map(strip, obspackid)
+            obspackid = list(map(str.strip,str(obspackid)))
             datasetname = ncfile  # use full name of dataset to propagate for clarity
             lats = ncf.get_variable('latitude').take(subselect, axis=0)
             lons = ncf.get_variable('longitude').take(subselect, axis=0)
@@ -127,7 +127,7 @@ class ObsPackObservations(Observations):
             logging.error(msg)
             logging.error("Did the sampling step succeed?")
             logging.error("...exiting")
-            raise IOError, msg
+            raise IOError(msg)
 
         ncf = io.ct_read(filename, method='read')
         ids = ncf.get_variable('obs_num')
@@ -136,7 +136,7 @@ class ObsPackObservations(Observations):
         logging.info("Successfully read data from model sample file (%s)" % filename)
 
         obs_ids = self.getvalues('id').tolist()
-        ids = map(int, ids)
+        ids = list(map(int, ids))
 
         missing_samples = []
 
@@ -173,7 +173,7 @@ class ObsPackObservations(Observations):
             dim10char = f.add_dim('string_of10chars', 10)
             dimcalcomp = f.add_dim('calendar_components', 6)
 
-            for key, value in self.site_move.iteritems():
+            for key, value in self.site_move.items():
                 msg = "Site is moved by %3.2f degrees latitude and %3.2f degrees longitude" % value 
                 f.add_attribute(key, msg)
 
@@ -296,7 +296,7 @@ class ObsPackObservations(Observations):
         if not os.path.exists(filename):
             msg = 'Could not find  the required sites.rc input file (%s) ' % filename
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
         else:
             self.sites_file = filename
 
@@ -325,7 +325,7 @@ class ObsPackObservations(Observations):
         site_move = {}
         site_hourly = {}   # option added to include only certain hours of the day (for e.g. PAL) IvdL
         site_incalt = {} # option to increase sampling altitude for sites specified in sites and weights file 
-        for key, value in sites_weights.iteritems():
+        for key, value in sites_weights.items():
             if 'co2_' in key or 'sf6' in key:  # to be fixed later, do not yet know how to parse valid keys from rc-files yet.... WP
                 sitename, sitecategory = key, value
                 sitename = sitename.strip()
@@ -350,8 +350,8 @@ class ObsPackObservations(Observations):
             identifier = obs.code
             species, site, method, lab, datasetnr = identifier.split('_')
 
-            if site_info.has_key(identifier):
-                if site_hourly.has_key(identifier):
+            if identifier in site_info:
+                if identifier in site_hourly:
                     obs.samplingstrategy = 2
                     hourf, hourt = site_hourly[identifier]
                     if int(obs.xdate.hour) >= hourf and int(obs.xdate.hour) <= hourt:
@@ -375,7 +375,7 @@ class ObsPackObservations(Observations):
             else:
                 logging.warning("Observation NOT found (%s, %d), please check sites.rc file (%s)  !!!" % (identifier, obs.id, self.sites_file))
 
-            if site_move.has_key(identifier):
+            if identifier in site_move:
 
                 movelat, movelon = site_move[identifier]
                 obs.lat = obs.lat + movelat
@@ -383,7 +383,7 @@ class ObsPackObservations(Observations):
 
                 logging.warning("Observation location for (%s, %d), is moved by %3.2f degrees latitude and %3.2f degrees longitude" % (identifier, obs.id, movelat, movelon))
 
-            if site_incalt.has_key(identifier):
+            if identifier in site_incalt:
 
                 incalt = site_incalt[identifier]
                 obs.height = obs.height + incalt
@@ -418,7 +418,7 @@ class ObsPackObservations(Observations):
             f.close()
             #return outfile
 
-        for key, value in self.site_move.iteritems():
+        for key, value in self.site_move.items():
             msg = "Site is moved by %3.2f degrees latitude and %3.2f degrees longitude" % value 
             f.add_attribute(key, msg)
 
diff --git a/da/carbondioxide/obspack_globalviewplus.py b/da/carbondioxide/obspack_globalviewplus.py
index 037f1652..da1a09a3 100755
--- a/da/carbondioxide/obspack_globalviewplus.py
+++ b/da/carbondioxide/obspack_globalviewplus.py
@@ -25,7 +25,7 @@ import sys
 import logging
         
 import datetime as dtm
-from string import strip
+#from string import strip
 from numpy import array, logical_and, sqrt
 sys.path.append(os.getcwd())
 sys.path.append('../../')
@@ -52,7 +52,7 @@ class ObsPackObservations(Observations):
         if not os.path.exists(op_dir):
             msg = 'Could not find  the required ObsPack distribution (%s) ' % op_dir
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
         else:
             self.obspack_dir = op_dir
             self.obspack_id = op_id
@@ -107,7 +107,7 @@ class ObsPackObservations(Observations):
             else:
                 obspackid = ncf.get_variable('obspack_id').take(subselect, axis=0)
             obspackid = [s.tostring().lower() for s in obspackid]
-            obspackid = map(strip, obspackid)
+            obspackid = list(map(str.strip,str(obspackid)))
             datasetname = ncfile  # use full name of dataset to propagate for clarity
             lats = ncf.get_variable('latitude').take(subselect, axis=0)
             lons = ncf.get_variable('longitude').take(subselect, axis=0)
@@ -133,7 +133,7 @@ class ObsPackObservations(Observations):
             logging.error(msg)
             logging.error("Did the sampling step succeed?")
             logging.error("...exiting")
-            raise IOError, msg
+            raise IOError(msg)
 
         ncf = io.ct_read(filename, method='read')
         ids = ncf.get_variable('obs_num')
@@ -142,7 +142,7 @@ class ObsPackObservations(Observations):
         logging.info("Successfully read data from model sample file (%s)" % filename)
 
         obs_ids = self.getvalues('id').tolist()
-        ids = map(int, ids)
+        ids = list(map(int, ids))
 
         missing_samples = []
 
@@ -179,7 +179,7 @@ class ObsPackObservations(Observations):
             dim10char = f.add_dim('string_of10chars', 10)
             dimcalcomp = f.add_dim('calendar_components', 6)
 
-            for key, value in self.site_move.iteritems():
+            for key, value in self.site_move.items():
                 msg = "Site is moved by %3.2f degrees latitude and %3.2f degrees longitude" % value 
                 f.add_attribute(key, msg)
 
@@ -302,7 +302,7 @@ class ObsPackObservations(Observations):
         if not os.path.exists(filename):
             msg = 'Could not find  the required sites.rc input file (%s) ' % filename
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
         else:
             self.sites_file = filename
 
@@ -330,7 +330,7 @@ class ObsPackObservations(Observations):
         site_info = {}
         site_move = {}
         site_incalt = {} # option to increase sampling altitude for sites specified in sites and weights file 
-        for key, value in sites_weights.iteritems():
+        for key, value in sites_weights.items():
             if 'co2_' in key or 'sf6' in key:  # to be fixed later, do not yet know how to parse valid keys from rc-files yet.... WP
                 sitename, sitecategory = key, value
                 sitename = sitename.strip()
@@ -357,7 +357,7 @@ class ObsPackObservations(Observations):
             identifier = obs.code
             species, site, method, lab, datasetnr = identifier.split('_')
 
-            if site_info.has_key(identifier):
+            if identifier in site_info:
                 if site_info[identifier]['category'] == 'do-not-use' or obs.flag == 99:
                     logging.warning("Observation found (%s, %d), but not used in assimilation." % (identifier, obs.id))
                     obs.mdm = site_info[identifier]['error'] * self.global_R_scaling
@@ -377,7 +377,7 @@ class ObsPackObservations(Observations):
             else:
                 logging.warning("Observation NOT found (%s, %d), please check sites.rc file (%s)  !!!" % (identifier, obs.id, self.sites_file))
 
-            if site_move.has_key(identifier):
+            if identifier in site_move:
 
                 movelat, movelon = site_move[identifier]
                 obs.lat = obs.lat + movelat
@@ -385,7 +385,7 @@ class ObsPackObservations(Observations):
 
                 logging.warning("Observation location for (%s, %d), is moved by %3.2f degrees latitude and %3.2f degrees longitude" % (identifier, obs.id, movelat, movelon))
 
-            if site_incalt.has_key(identifier):
+            if identifier in site_incalt:
 
                 incalt = site_incalt[identifier]
                 obs.height = obs.height + incalt
@@ -419,7 +419,7 @@ class ObsPackObservations(Observations):
             f.close()
             #return outfile
 
-        for key, value in self.site_move.iteritems():
+        for key, value in self.site_move.items():
             msg = "Site is moved by %3.2f degrees latitude and %3.2f degrees longitude" % value 
             f.add_attribute(key, msg)
 
diff --git a/da/carbondioxide/obspack_globalviewplus2.py b/da/carbondioxide/obspack_globalviewplus2.py
index 220cc965..3a7aec3e 100755
--- a/da/carbondioxide/obspack_globalviewplus2.py
+++ b/da/carbondioxide/obspack_globalviewplus2.py
@@ -25,7 +25,7 @@ import sys
 import logging
         
 import datetime as dtm
-from string import strip
+#from string import strip
 from numpy import array, logical_and, sqrt
 sys.path.append(os.getcwd())
 sys.path.append('../../')
@@ -52,7 +52,7 @@ class ObsPackObservations(Observations):
         if not os.path.exists(op_dir):
             msg = 'Could not find  the required ObsPack distribution (%s) ' % op_dir
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
         else:
             self.obspack_dir = op_dir
             self.obspack_id = op_id
@@ -107,7 +107,7 @@ class ObsPackObservations(Observations):
             else:
                 obspackid = ncf.get_variable('obspack_id').take(subselect, axis=0)
             obspackid = [s.tostring().lower() for s in obspackid]
-            obspackid = map(strip, obspackid)
+            obspackid = list(map(str.strip,str(obspackid)))
             datasetname = ncfile  # use full name of dataset to propagate for clarity
             lats = ncf.get_variable('latitude').take(subselect, axis=0)
             lons = ncf.get_variable('longitude').take(subselect, axis=0)
@@ -133,7 +133,7 @@ class ObsPackObservations(Observations):
             logging.error(msg)
             logging.error("Did the sampling step succeed?")
             logging.error("...exiting")
-            raise IOError, msg
+            raise IOError(msg)
 
         ncf = io.ct_read(filename, method='read')
         ids = ncf.get_variable('obs_num')
@@ -142,7 +142,7 @@ class ObsPackObservations(Observations):
         logging.info("Successfully read data from model sample file (%s)" % filename)
 
         obs_ids = self.getvalues('id').tolist()
-        ids = map(int, ids)
+        ids = list(map(int, ids))
 
         missing_samples = []
 
@@ -179,7 +179,7 @@ class ObsPackObservations(Observations):
             dim10char = f.add_dim('string_of10chars', 10)
             dimcalcomp = f.add_dim('calendar_components', 6)
 
-            for key, value in self.site_move.iteritems():
+            for key, value in self.site_move.items():
                 msg = "Site is moved by %3.2f degrees latitude and %3.2f degrees longitude" % value 
                 f.add_attribute(key, msg)
 
@@ -260,27 +260,27 @@ class ObsPackObservations(Observations):
             savedict['missing_value'] = '!'
             f.add_data(savedict)
 
-	    data = self.getvalues('obs')
+            data = self.getvalues('obs')
 
-	    savedict = io.std_savedict.copy()
-	    savedict['name'] = "observed"
-	    savedict['long_name'] = "observedvalues"
-	    savedict['units'] = "mol mol-1"
-	    savedict['dims'] = dimid
-	    savedict['values'] = data.tolist()
-	    savedict['comment'] = 'Observations used in optimization'
-	    f.add_data(savedict)
+            savedict = io.std_savedict.copy()
+            savedict['name'] = "observed"
+            savedict['long_name'] = "observedvalues"
+            savedict['units'] = "mol mol-1"
+            savedict['dims'] = dimid
+            savedict['values'] = data.tolist()
+            savedict['comment'] = 'Observations used in optimization'
+            f.add_data(savedict)
     
-	    data = self.getvalues('mdm')
+            data = self.getvalues('mdm')
     
-	    savedict = io.std_savedict.copy()
-	    savedict['name'] = "modeldatamismatch"
-	    savedict['long_name'] = "modeldatamismatch"
-	    savedict['units'] = "[mol mol-1]"
-	    savedict['dims'] = dimid
-	    savedict['values'] = data.tolist()
-	    savedict['comment'] = 'Standard deviation of mole fractions resulting from model-data mismatch'
-	    f.add_data(savedict)
+            savedict = io.std_savedict.copy()
+            savedict['name'] = "modeldatamismatch"
+            savedict['long_name'] = "modeldatamismatch"
+            savedict['units'] = "[mol mol-1]"
+            savedict['dims'] = dimid
+            savedict['values'] = data.tolist()
+            savedict['comment'] = 'Standard deviation of mole fractions resulting from model-data mismatch'
+            f.add_data(savedict)
             f.close()
 
             logging.debug("Successfully wrote data to obs file")
@@ -302,7 +302,7 @@ class ObsPackObservations(Observations):
         if not os.path.exists(filename):
             msg = 'Could not find  the required sites.rc input file (%s) ' % filename
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
         else:
             self.sites_file = filename
 
@@ -330,7 +330,7 @@ class ObsPackObservations(Observations):
         site_info = {}
         site_move = {}
         site_incalt = {} # option to increase sampling altitude for sites specified in sites and weights file 
-        for key, value in sites_weights.iteritems():
+        for key, value in sites_weights.items():
             if 'co2_' in key or 'sf6' in key:  # to be fixed later, do not yet know how to parse valid keys from rc-files yet.... WP
                 sitename, sitecategory = key, value
                 sitename = sitename.strip()
@@ -357,7 +357,7 @@ class ObsPackObservations(Observations):
             identifier = obs.code
             species, site, method, lab, datasetnr = identifier.split('_')
 
-            if site_info.has_key(identifier):
+            if identifier in site_info:
                 if site_info[identifier]['category'] == 'do-not-use' or obs.flag == 99:
                     logging.warning("Observation found (%s, %d), but not used in assimilation." % (identifier, obs.id))
                     obs.mdm = site_info[identifier]['error'] * self.global_R_scaling
@@ -377,7 +377,7 @@ class ObsPackObservations(Observations):
             else:
                 logging.warning("Observation NOT found (%s, %d), please check sites.rc file (%s)  !!!" % (identifier, obs.id, self.sites_file))
 
-            if site_move.has_key(identifier):
+            if identifier in site_move:
 
                 movelat, movelon = site_move[identifier]
                 obs.lat = obs.lat + movelat
@@ -385,7 +385,7 @@ class ObsPackObservations(Observations):
 
                 logging.warning("Observation location for (%s, %d), is moved by %3.2f degrees latitude and %3.2f degrees longitude" % (identifier, obs.id, movelat, movelon))
 
-            if site_incalt.has_key(identifier):
+            if identifier in site_incalt:
 
                 incalt = site_incalt[identifier]
                 obs.height = obs.height + incalt
@@ -419,7 +419,7 @@ class ObsPackObservations(Observations):
             f.close()
             #return outfile
 
-        for key, value in self.site_move.iteritems():
+        for key, value in self.site_move.items():
             msg = "Site is moved by %3.2f degrees latitude and %3.2f degrees longitude" % value 
             f.add_attribute(key, msg)
 
diff --git a/da/carbondioxide/statevector.py b/da/carbondioxide/statevector.py
index a371fd9d..60856e77 100755
--- a/da/carbondioxide/statevector.py
+++ b/da/carbondioxide/statevector.py
@@ -62,7 +62,7 @@ class CO2StateVector(StateVector):
             if not os.path.exists(fil):
                 msg = "Cannot find the specified file %s" % fil
                 logging.error(msg)
-                raise IOError, msg
+                raise IOError(msg)
             else:
                 logging.info("Using covariance file: %s" % fil)
 
@@ -70,7 +70,7 @@ class CO2StateVector(StateVector):
         f_bio = io.ct_read(file_bio_cov, 'read')
 
         cov_ocn = f_ocn.get_variable('CORMAT')
-        if f_bio.variables.has_key('covariance'):
+        if 'covariance' in f_bio.variables:
             cov_bio = f_bio.get_variable('covariance')  # newly created CTDAS covariance files
         else:
             cov_bio = f_bio.get_variable('qprior')  # old CarbonTracker covariance files
diff --git a/da/co2gridded/dasystem.py b/da/co2gridded/dasystem.py
index 8c94d5da..1b9fae9c 100755
--- a/da/co2gridded/dasystem.py
+++ b/da/co2gridded/dasystem.py
@@ -61,10 +61,10 @@ class CO2GriddedDaSystem(DaSystem):
             if v == 'False': self[k] = False
 
         for key in needed_rc_items:
-            if not self.has_key(key):
+            if key not in self:
                 msg = 'Missing a required value in rc-file : %s' % key
                 logging.error(msg)
-                raise IOError, msg
+                raise IOError(msg)
 
         logging.debug('DA System Info settings have been validated succesfully')
 
diff --git a/da/co2gridded/statevector.py b/da/co2gridded/statevector.py
index 2f5917b1..7fa132b4 100755
--- a/da/co2gridded/statevector.py
+++ b/da/co2gridded/statevector.py
@@ -71,7 +71,7 @@ class CO2GriddedStateVector(StateVector):
             if not os.path.exists(file):
                 msg = "Cannot find the specified file %s" % file 
                 logging.error(msg)
-                raise IOError, msg
+                raise IOError(msg)
             else:
                 logging.debug("Using covariance file: %s" % file)
 
diff --git a/da/co2gridded/statevectorNHgridded.py b/da/co2gridded/statevectorNHgridded.py
index 3e621b28..24027f90 100755
--- a/da/co2gridded/statevectorNHgridded.py
+++ b/da/co2gridded/statevectorNHgridded.py
@@ -81,7 +81,7 @@ class CO2GriddedStateVector(StateVector):
             if not os.path.exists(file):
                 msg = "Cannot find the specified file %s" % file 
                 logging.error(msg)
-                raise IOError, msg
+                raise IOError(msg)
             else:
                 logging.debug("Using covariance file: %s" % file)
 
diff --git a/da/methane/dasystem.py b/da/methane/dasystem.py
index d54bb57a..1ec022eb 100755
--- a/da/methane/dasystem.py
+++ b/da/methane/dasystem.py
@@ -46,7 +46,7 @@ class MethaneDaSystem(DaSystem):
                 self[k] = False
 
         for key in needed_rc_items:
-            if not self.has_key(key):
+            if key not in self:
                 logging.warning('Missing a required value in rc-file : %s' % key)
         logging.debug('DA System Info settings have been validated succesfully')
 
diff --git a/da/methane/initexit.py b/da/methane/initexit.py
index 475c20a0..9079cbff 100755
--- a/da/methane/initexit.py
+++ b/da/methane/initexit.py
@@ -161,11 +161,11 @@ def setup_fmi(self):
 
         strippedname = os.path.split(self['jobrcfilename'])[-1]
         self['jobrcfilename'] = os.path.join(self['dir.exec'], strippedname)
-        if self.dasystem.has_key('copyregionsfile'):
+        if 'copyregionsfile' in self.dasystem:
           shutil.copy(os.path.join(self.dasystem['regionsfile']),os.path.join(self['dir.exec'],'da','methane','analysis','copied_regions.nc'))
           logging.info('Copied regions file to the analysis directory: %s'%os.path.join(self.dasystem['regionsfile'])) 
 
-          if self.dasystem.has_key('extendedregionsfile'):
+          if 'extendedregionsfile' in self.dasystem:
               shutil.copy(os.path.join(self.dasystem['extendedregionsfile']),os.path.join(self['dir.exec'],'da','analysis','copied_regions_extended.nc')) 
               logging.info('Copied extended regions file to the analysis directory: %s'%os.path.join(self.dasystem['extendedregionsfile'])) 
 
@@ -177,7 +177,7 @@ def setup_fmi(self):
             logging.info('Deleting pickle file %s to make sure the correct regions are used'%os.path.split(filename)[1])
             os.remove(filename) 
 
-        if self.dasystem.has_key('random.seed.init'):
+        if 'random.seed.init' in self.dasystem:
             self.read_random_seed(True)
 
     self.parse_times()
diff --git a/da/methane/obs.py b/da/methane/obs.py
index ba5bd4b1..e5bd46cb 100755
--- a/da/methane/obs.py
+++ b/da/methane/obs.py
@@ -56,7 +56,7 @@ class MethaneObservations(Observations):
         if not os.path.exists(filename):
             msg = 'Could not find  the required observation input file (%s) ' % filename
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
         else:
             self.obs_filename = filename
         self.datalist = []
@@ -120,7 +120,7 @@ class MethaneObservations(Observations):
             logging.error(msg)
             logging.error("Did the sampling step succeed?")
             logging.error("...exiting")
-            raise IOError, msg
+            raise IOError(msg)
 
         ncf = io.ct_read(filename, method='read')
         ids = ncf.get_variable('obs_num')
@@ -266,7 +266,7 @@ class MethaneObservations(Observations):
         if not os.path.exists(filename):
             msg = 'Could not find the required sites.rc input file (%s)' % filename
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
         else:
             self.sites_file = filename
 
@@ -308,7 +308,7 @@ class MethaneObservations(Observations):
 
         for obs in self.datalist:
             obs.mdm = 1000.0  # default is very high model-data-mismatch, until explicitly set by script
-            if site_info.has_key(obs.code): 
+            if obs.code in site_info:
                 logging.debug("Observation found (%s)" % obs.code)
                 obs.mdm = site_info[obs.code]['error'] * self.global_R_scaling
                 obs.may_localize = site_info[obs.code]['may_localize']
diff --git a/da/methane/pipeline.py b/da/methane/pipeline.py
index a644d0a2..a15d40f0 100755
--- a/da/methane/pipeline.py
+++ b/da/methane/pipeline.py
@@ -55,18 +55,18 @@ def forward_pipeline(dacycle, platform, dasystem, samples, statevector, obsopera
     logging.info(header + "Initializing current cycle" + footer)
     start_job(dacycle, dasystem, platform, statevector, samples, obsoperator)                   
 
-    if dacycle.has_key('forward.savestate.exceptsam'):
+    if 'forward.savestate.exceptsam' in dacycle:
         sam = (dacycle['forward.savestate.exceptsam'].upper() in ["TRUE","T","YES","Y"])
     else:
         sam = False
 
-    if dacycle.has_key('forward.savestate.dir'):
+    if 'forward.savestate.dir' in dacycle:
         fwddir = dacycle['forward.savestate.dir']
     else:
         logging.debug("No forward.savestate.dir key found in rc-file, proceeding with self-constructed prior parameters")
         fwddir = False
 
-    if dacycle.has_key('forward.savestate.legacy'):
+    if 'forward.savestate.legacy' in dacycle:
         legacy = (dacycle['forward.savestate.legacy'].upper() in ["TRUE","T","YES","Y"])
     else:
         legacy = False
@@ -162,7 +162,7 @@ def analysis_pipeline(dacycle, platform, dasystem, samples, statevector):
 def archive_pipeline(dacycle, platform, dasystem):
     """ Main entry point for archiving of output from one disk/system to another """
 
-    if not dacycle.has_key('task.rsync'):
+    if 'task.rsync' not in dacycle:
         logging.info('rsync task not found, not starting automatic backup...')
         return
     else:
@@ -353,7 +353,7 @@ def invert(dacycle, statevector, optimizer):
                   statevector.nparams,  # Aki: instead of reading from rc
                   statevector.nobs)
 
-    if not dacycle.dasystem.has_key('opt.algorithm'):
+    if 'opt.algorithm' not in dacycle.dasystem:
         logging.info("There was no minimum least squares algorithm specified in the DA System rc file (key : opt.algorithm)") 
         logging.info("...using serial algorithm as default...")
         optimizer.set_algorithm('Serial')
diff --git a/da/platform/capegrim.py b/da/platform/capegrim.py
index 1a5e69b6..269a3a3e 100755
--- a/da/platform/capegrim.py
+++ b/da/platform/capegrim.py
@@ -77,12 +77,12 @@ class CapeGrimPlatform(Platform):
             template += """#$ -hold_jid depends \n"""
 
         # First replace from passed dictionary
-        for k, v in joboptions.iteritems():
+        for k, v in joboptions.items():
             while k in template:
                 template = template.replace(k, v)
 
         # Fill remaining values with std_options
-        for k, v in std_joboptions.iteritems():
+        for k, v in std_joboptions.items():
             while k in template:
                 template = template.replace(k, v)
 
diff --git a/da/platform/cartesius.py b/da/platform/cartesius.py
index b6a6fb41..b1daa0aa 100755
--- a/da/platform/cartesius.py
+++ b/da/platform/cartesius.py
@@ -108,12 +108,12 @@ class CartesiusPlatform(Platform):
             template += """#$ -hold_jid depends \n"""
 
         # First replace from passed dictionary
-        for k, v in joboptions.iteritems():
+        for k, v in joboptions.items():
             while k in template:
                 template = template.replace(k, v)
 
         # Fill remaining values with std_options
-        for k, v in std_joboptions.iteritems():
+        for k, v in std_joboptions.items():
             while k in template:
                 template = template.replace(k, v)
 
diff --git a/da/platform/fmi.py b/da/platform/fmi.py
index c3458abe..02e73307 100755
--- a/da/platform/fmi.py
+++ b/da/platform/fmi.py
@@ -77,12 +77,12 @@ class FmiPlatform(Platform):
             template += """#$ -hold_jid depends \n"""
 
         # First replace from passed dictionary
-        for k,v in joboptions.iteritems():
+        for k,v in joboptions.items():
             while k in template:
                 template = template.replace(k,v)
 
         # Fill remaining values with std_options
-        for k,v in std_joboptions.iteritems():
+        for k,v in std_joboptions.items():
             while k in template:
                 template = template.replace(k,v)
 
diff --git a/da/platform/huygens.py b/da/platform/huygens.py
index a30b6e86..fc9c5166 100755
--- a/da/platform/huygens.py
+++ b/da/platform/huygens.py
@@ -115,12 +115,12 @@ class HuygensPlatform(Platform):
             template += """#$ -hold_jid depends \n"""
 
         # First replace from passed dictionary
-        for k, v in joboptions.iteritems():
+        for k, v in joboptions.items():
             while k in template:
                 template = template.replace(k, v)
 
         # Fill remaining values with std_options
-        for k, v in std_joboptions.iteritems():
+        for k, v in std_joboptions.items():
             while k in template:
                 template = template.replace(k, v)
 
diff --git a/da/platform/jet.py b/da/platform/jet.py
index 44e2ac20..d28fcf5b 100755
--- a/da/platform/jet.py
+++ b/da/platform/jet.py
@@ -58,12 +58,12 @@ class JetPlatform(Platform):
             template += """#$ -sync y\n"""
 
         # First replace from passed dictionary
-        for k, v in joboptions.iteritems():
+        for k, v in joboptions.items():
             while k in template:
                 template = template.replace(k, v)
 
         # Fill remaining values with std_options
-        for k, v in std_joboptions.iteritems():
+        for k, v in std_joboptions.items():
             while k in template:
                 template = template.replace(k, v)
 
diff --git a/da/platform/kermadec.py b/da/platform/kermadec.py
index 6dd93104..cbad74bf 100755
--- a/da/platform/kermadec.py
+++ b/da/platform/kermadec.py
@@ -74,12 +74,12 @@ class KermadecPlatform(Platform):
             template += """#$ -hold_jid depends \n"""
 
         # First replace from passed dictionary
-        for k, v in joboptions.iteritems():
+        for k, v in joboptions.items():
             while k in template:
                 template = template.replace(k, v)
 
         # Fill remaining values with std_options
-        for k, v in std_joboptions.iteritems():
+        for k, v in std_joboptions.items():
             while k in template:
                 template = template.replace(k, v)
 
diff --git a/da/platform/maunaloa.py b/da/platform/maunaloa.py
index 7dd5ba91..0f5919f1 100755
--- a/da/platform/maunaloa.py
+++ b/da/platform/maunaloa.py
@@ -71,12 +71,12 @@ class MaunaloaPlatform(Platform):
             template += """#$ -hold_jid depends \n"""
 
         # First replace from passed dictionary
-        for k, v in joboptions.iteritems():
+        for k, v in joboptions.items():
             while k in template:
                 template = template.replace(k, v)
 
         # Fill remaining values with std_options
-        for k, v in std_joboptions.iteritems():
+        for k, v in std_joboptions.items():
             while k in template:
                 template = template.replace(k, v)
 
diff --git a/da/sf6/pipeline.py b/da/sf6/pipeline.py
index f5f918b9..4767d299 100755
--- a/da/sf6/pipeline.py
+++ b/da/sf6/pipeline.py
@@ -56,13 +56,13 @@ def forward_pipeline(dacycle, platform, dasystem, samples, statevector, obsopera
     logging.info(header + "Initializing current cycle" + footer)
     start_job(dacycle, dasystem, platform, statevector, samples, obsoperator)                   
 
-    if dacycle.has_key('forward.savestate.dir'):
+    if 'forward.savestate.dir' in dacycle:
         fwddir = dacycle['forward.savestate.dir']
     else:
         logging.debug("No forward.savestate.dir key found in rc-file, proceeding with self-constructed prior parameters")
         fwddir = False
 
-    if dacycle.has_key('forward.savestate.legacy'):
+    if 'forward.savestate.legacy' in dacycle:
         legacy = (dacycle['forward.savestate.legacy'].upper() in ["TRUE","T","YES","Y"])
     else:
         legacy = False
@@ -168,7 +168,7 @@ def analysis_pipeline(dacycle, platform, dasystem, samples, statevector, obsoper
 def archive_pipeline(dacycle, platform, dasystem):
     """ Main entry point for archiving of output from one disk/system to another """
 
-    if not dacycle.has_key('task.rsync'):
+    if 'task.rsync' not in  dacycle:
         logging.info('rsync task not found, not starting automatic backup...')
         return
     else:
@@ -356,7 +356,7 @@ def invert(dacycle, statevector, optimizer):
                   int(dacycle.dasystem['nparameters']),
                   statevector.nobs)
 
-    if not dacycle.dasystem.has_key('opt.algorithm'):
+    if 'opt.algorithm' not in dacycle.dasystem:
         logging.info("There was no minimum least squares algorithm specified in the DA System rc file (key : opt.algorithm)") 
         logging.info("...using serial algorithm as default...")
         optimizer.set_algorithm('Serial')
diff --git a/da/stilt/expand_molefractions.py b/da/stilt/expand_molefractions.py
index a9fb83a3..bb1dfba8 100755
--- a/da/stilt/expand_molefractions.py
+++ b/da/stilt/expand_molefractions.py
@@ -98,7 +98,7 @@ def write_mole_fractions(dacycle):
         fc_simulated = ncf_fc_in.get_variable('modelsamplesmean_prior')
         fc_simulated_ens = ncf_fc_in.get_variable('modelsamplesdeviations_prior')
         fc_flag      = ncf_fc_in.get_variable('flag')
-        if not dacycle.dasystem.has_key('opt.algorithm'):
+        if 'opt.algorithm' not in dacycle.dasystem:
             fc_r         = ncf_fc_in.get_variable('modeldatamismatchvariance')
             fc_hphtr     = ncf_fc_in.get_variable('totalmolefractionvariance')
         elif dacycle.dasystem['opt.algorithm'] == 'serial':
@@ -161,10 +161,10 @@ def write_mole_fractions(dacycle):
 
             # get nobs dimension
 
-            if ncf_out.dimensions.has_key('id'): 
+            if 'id' in ncf_out.dimensions:
                 dimidob = ncf_out.dimensions['id']
                 dimid = ('id',)
-            elif ncf_out.dimensions.has_key('obs'): 
+            elif 'obs' in ncf_out.dimensions:
                 dimidob = ncf_out.dimensions['obs']
                 dimid = ('obs',)
 
@@ -262,11 +262,11 @@ def write_mole_fractions(dacycle):
 
         # Get existing file obs_nums to determine match to local obs_nums
 
-        if ncf_out.variables.has_key('id'):
+        if 'id' in ncf_out.variables:
             file_obs_nums = ncf_out.get_variable('id')
-        elif ncf_out.variables.has_key('ccgg_evn'):
+        elif 'ccgg_evn' in ncf_out.variables:
             file_obs_nums = ncf_out.get_variable('ccgg_evn')
-        elif ncf_out.variables.has_key('obspack_num'):
+        elif 'obspack_num' in ncf_out.variables:
             file_obs_nums = ncf_out.get_variable('obspack_num')
 
         # Get all obs_nums related to this file, determine their indices in the local arrays
diff --git a/da/stilt/obspack.py b/da/stilt/obspack.py
index 3387d5e7..da8b708a 100755
--- a/da/stilt/obspack.py
+++ b/da/stilt/obspack.py
@@ -53,7 +53,7 @@ class ObsPackObservations(Observations):
         if not os.path.exists(op_dir):
             msg = 'Could not find  the required ObsPack distribution (%s) ' % op_dir
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
         else:
             self.obspack_dir = op_dir
             self.obspack_id = op_id
@@ -152,7 +152,7 @@ class ObsPackObservations(Observations):
             logging.error(msg)
             logging.error("Did the sampling step succeed?")
             logging.error("...exiting")
-            raise IOError, msg
+            raise IOError(msg)
 
         ncf = io.ct_read(filename, method='read')
         ids = ncf.get_variable('obs_num')
@@ -364,7 +364,7 @@ class ObsPackObservations(Observations):
         if not os.path.exists(filename):
             msg = 'Could not find  the required sites.rc input file (%s) ' % filename
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
         else:
             self.sites_file = filename
 
@@ -444,8 +444,8 @@ class ObsPackObservations(Observations):
 
 
 
-            if site_info.has_key(identifier):
-                if site_foot.has_key(identifier):
+            if identifier in site_info:
+                if identifier in site_foot:
                     path_foot  = site_foot[identifier]
                     dir_foot = os.path.join(path_foot,'%s'%obs.xdate.year,'%02d'%obs.xdate.month)
                     files_foot = os.listdir(dir_foot)
@@ -458,7 +458,7 @@ class ObsPackObservations(Observations):
                     else:
                         exclude_footprint = True
                         logging.info("id not in footprint %s" %str_id)
-                if site_hourly.has_key(identifier):
+                if identifier in site_hourly:
                     obs.samplingstrategy = 2
                     hourf, hourt = site_hourly[identifier]
                     if int(obs.xdate.hour+obs.utc2loc) > hourf and int(obs.xdate.hour+obs.utc2loc) < hourt:
@@ -495,7 +495,7 @@ class ObsPackObservations(Observations):
             else:
                 logging.warning("Site NOT found (%s, %d), please check sites.rc file (%s)  !!!" % (identifier, obs.id, self.sites_file))
 
-            if site_move.has_key(identifier):
+            if identifier in site_move:
 
                 movelat, movelon = site_move[identifier]
                 obs.lat = obs.lat + movelat
@@ -503,7 +503,7 @@ class ObsPackObservations(Observations):
 
                 logging.warning("Observation location for (%s, %d), is moved by %3.2f degrees latitude and %3.2f degrees longitude" % (identifier, obs.id, movelat, movelon))
 
-            if site_incalt.has_key(identifier):
+            if identifier in site_incalt:
 
                 incalt = site_incalt[identifier]
                 obs.height = obs.height + incalt
diff --git a/da/stilt/pipeline.py b/da/stilt/pipeline.py
index 1316d64a..eb150469 100755
--- a/da/stilt/pipeline.py
+++ b/da/stilt/pipeline.py
@@ -57,18 +57,18 @@ def forward_pipeline(dacycle, platform, dasystem, samples, statevector, obsopera
     logging.info(header + "Initializing current cycle" + footer)
     start_job(dacycle, dasystem, platform, statevector, samples, obsoperator)
 
-    if dacycle.has_key('forward.savestate.exceptsam'):
+    if 'forward.savestate.exceptsam' in dacycle:
         sam = (dacycle['forward.savestate.exceptsam'].upper() in ["TRUE","T","YES","Y"])
     else:
         sam = False
 
-    if dacycle.has_key('forward.savestate.dir'):
+    if 'forward.savestate.dir' in dacycle:
         fwddir = dacycle['forward.savestate.dir']
     else:
         logging.debug("No forward.savestate.dir key found in rc-file, proceeding with self-constructed prior parameters")
         fwddir = False
 
-    if dacycle.has_key('forward.savestate.legacy'):
+    if 'forward.savestate.legacy' in dacycle:
         legacy = (dacycle['forward.savestate.legacy'].upper() in ["TRUE","T","YES","Y"])
     else:
         legacy = False
@@ -184,7 +184,7 @@ def analysis_pipeline(dacycle, platform, dasystem, samples, statevector):
 def archive_pipeline(dacycle, platform, dasystem):
     """ Main entry point for archiving of output from one disk/system to another """
 
-    if not dacycle.has_key('task.rsync'):
+    if 'task.rsync' not in dacycle:
         logging.info('rsync task not found, not starting automatic backup...')
         return
     else:
@@ -400,7 +400,7 @@ def invert(dacycle, statevector, optimizer):
                   int(dacycle.dasystem['nparameters']),
                   statevector.nobs)
 
-    if not dacycle.dasystem.has_key('opt.algorithm'):
+    if 'opt.algorithm' not in dacycle.dasystem:
         logging.info("There was no minimum least squares algorithm specified in the DA System rc file (key : opt.algorithm)")
         logging.info("...using serial algorithm as default...")
         optimizer.set_algorithm('Serial')
diff --git a/da/stilt/statevector.py b/da/stilt/statevector.py
index 65198b9c..654b39aa 100755
--- a/da/stilt/statevector.py
+++ b/da/stilt/statevector.py
@@ -59,7 +59,7 @@ class CO2GriddedStateVector(StateVector):
             if not os.path.exists(file):
                 msg = "Cannot find the specified file %s" % file
                 logging.error(msg)
-                raise IOError, msg
+                raise IOError(msg)
             else:
                 logging.debug("Using covariance file: %s" % file)
 
diff --git a/da/tm5/methaneobservationoperator.py b/da/tm5/methaneobservationoperator.py
index 434e9a4f..49cb3f9b 100755
--- a/da/tm5/methaneobservationoperator.py
+++ b/da/tm5/methaneobservationoperator.py
@@ -96,7 +96,7 @@ def validate_rc_methane(self):
         if not self.tm_settings.has_key(key):
             msg = 'Missing a required value in rc-file : %s' % key
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
     logging.debug('rc-file has been validated succesfully')
 
 TM5ObservationOperator.validate_rc = validate_rc_methane
diff --git a/da/tm5/observationoperator.py b/da/tm5/observationoperator.py
index 3713b5af..fa15e241 100755
--- a/da/tm5/observationoperator.py
+++ b/da/tm5/observationoperator.py
@@ -332,7 +332,7 @@ class TM5ObservationOperator(ObservationOperator):
             if not self.tm_settings.has_key(key):
                 msg = 'Missing a required value in rc-file : %s' % key
                 logging.error(msg)
-                raise IOError, msg
+                raise IOError(msg)
         logging.debug('rc-file has been validated succesfully')
 
 
@@ -383,7 +383,7 @@ class TM5ObservationOperator(ObservationOperator):
         if not os.path.exists(datadir):
             msg = "The specified input directory for the TM5 model to read from does not exist (%s), exiting..." % datadir 
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
 
         datafiles = os.listdir(datadir)
 
@@ -393,14 +393,14 @@ class TM5ObservationOperator(ObservationOperator):
             msg = "The specified obs input file for the TM5 model to read from does not exist (%s), exiting..." % obsfile  
             logging.error(msg)
             if not self.dacycle.has_key('forward.savestate.dir'): 
-                raise IOError, msg
+                raise IOError(msg)
 
         for n in range(int(self.dacycle['da.optimizer.nmembers'])):
             paramfile = 'parameters.%03d.nc' % n
             if paramfile not in datafiles:
                 msg = "The specified parameter input file for the TM5 model to read from does not exist (%s), exiting..." % paramfile 
                 logging.error(msg)
-                raise IOError, msg
+                raise IOError(msg)
 
         # Next, make sure there is an actual model version compiled and ready to execute
 
diff --git a/da/tools/general.py b/da/tools/general.py
index d74c28ab..79c7bcad 100755
--- a/da/tools/general.py
+++ b/da/tools/general.py
@@ -48,7 +48,7 @@ WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
 def validate_rc(rcfile, needed_items):
     """ validate the contents of an rc-file given a dictionary of required keys """
 
-    for k, v in rcfile.iteritems():
+    for k, v in rcfile.items():
         if v == 'True' :
             rcfile[k] = True
         if v == 'False':
@@ -60,7 +60,7 @@ def validate_rc(rcfile, needed_items):
         if not rcfile.has_key(key):
             msg = 'Missing a required value in rc-file : %s' % key
             logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
     logging.debug('rc-file has been validated succesfully')  
 
 
@@ -156,14 +156,14 @@ def name_convert(name=None, to=None):
         except:
             return ""
 
-        platform_num = [ v for k, v in platform_dict.iteritems() if platform.lower() == k ]
+        platform_num = [ v for k, v in platform_dict.items() if platform.lower() == k ]
         if len(platform_num) == 0:
-            print '%s: Platform %s not found in platform dict.' % (identifier, platform)
+            print('%s: Platform %s not found in platform dict.' % (identifier, platform))
             return ""
 
-        strategy_char = [ v for k, v in strategy_dict.iteritems() if strategy.lower() == k ]
+        strategy_char = [ v for k, v in strategy_dict.items() if strategy.lower() == k ]
         if len(strategy_char) == 0:
-            print '%s: Strategy %s not found in strategy dict.' % (identifier, strategy)
+            print('%s: Strategy %s not found in strategy dict.' % (identifier, strategy))
             return "" 
         return "%s_%2.2d%1s%1d" % (code, lab_num, strategy_char[0].upper(), int(platform_num[0]))
 
@@ -181,15 +181,15 @@ def name_convert(name=None, to=None):
         except:
             return ""
 
-        platform = [ k for k, v in platform_dict.iteritems() if v == platform_num ]
+        platform = [ k for k, v in platform_dict.items() if v == platform_num ]
         if len(platform) == 0:
-            print '%s: Platform number %s not found in platform dict.' % (identifier, platform_num)
+            print('%s: Platform number %s not found in platform dict.' % (identifier, platform_num))
             return ""
         
         pattern = re.compile(strategy_char, re.IGNORECASE)
-        strategy = [ k for k, v in strategy_dict.iteritems() if pattern.search(v) ]
+        strategy = [ k for k, v in strategy_dict.items() if pattern.search(v) ]
         if len(strategy) == 0:
-            print '%s: Strategy character %s not found in strategy list.' % (identifier, strategy_char)
+            print('%s: Strategy character %s not found in strategy list.' % (identifier, strategy_char))
             return ""
         return "%s_%s-%s_%d" % (code, platform[0], strategy[0], lab_num)
 
@@ -247,7 +247,7 @@ def date2num(d):
     practice.  For details, see the module docstring.
     """
     try: 
-	return np.asarray([_to_ordinalf(val) for val in d])
+        return np.asarray([_to_ordinalf(val) for val in d])
     except:
         return _to_ordinalf(d)
 
diff --git a/da/tools/initexit.py b/da/tools/initexit.py
index a3878c54..86ae19f9 100755
--- a/da/tools/initexit.py
+++ b/da/tools/initexit.py
@@ -73,9 +73,9 @@ import glob
 import shutil
 import copy
 import getopt
-import cPickle
+#import cPickle
 import numpy as np
-from string import join
+#from string import join
 
 import da.tools.rc as rc
 from da.tools.general import create_dirs, to_datetime, advance_time
@@ -138,7 +138,7 @@ class CycleControl(dict):
         """
 
         rcdata = rc.read(rcfilename)
-        for k, v in rcdata.iteritems():
+        for k, v in rcdata.items():
             self[k] = v
 
         logging.info('DA Cycle rc-file (%s) loaded successfully' % rcfilename)
@@ -150,7 +150,7 @@ class CycleControl(dict):
         Currently required keys are :attr:`~da.tools.initexit.needed_da_items`
         """
 
-        for k, v in self.iteritems():
+        for k, v in self.items():
             if v in ['True', 'true', 't', 'T', 'y', 'yes']:
                 self[k] = True
             if v in ['False', 'false', 'f', 'F', 'n', 'no']:
@@ -160,14 +160,14 @@ class CycleControl(dict):
             if k in ['time.start', 'time.end', 'time.finish', 'da.restart.tstamp']:
                 self[k] = to_datetime(v)
         for key in needed_da_items:
-            if not self.has_key(key):
+            if key not in self:
                 msg = 'Missing a required value in rc-file : %s' % key
                 logging.error(msg)
                 logging.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ')
                 logging.error('Please note the update on Dec 02 2011 where rc-file names for DaSystem and ')
                 logging.error('are from now on specified in the main rc-file (see da/rc/da.rc for example)')
                 logging.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ')
-                raise IOError, msg
+                raise IOError(msg)
         logging.debug('DA Cycle settings have been validated succesfully')
 
     def parse_times(self):
@@ -257,7 +257,7 @@ class CycleControl(dict):
         filename = os.path.join(self['dir.restart'], 'randomseed_%s.pickle' % self['time.start'].strftime('%Y%m%d'))
         f = open(filename, 'wb')
         seed = np.random.get_state()
-        cPickle.dump(seed, f, -1)
+#        cPickle.dump(seed, f, -1)
         f.close()
 
         logging.info("Saved the random seed generator values to file")
@@ -271,7 +271,7 @@ class CycleControl(dict):
             filename = os.path.join(self['dir.restart'], 'randomseed_%s.pickle' % self['da.restart.tstamp'].strftime('%Y%m%d'))
             logging.info("Retrieved the random seed generator values of last cycle from file")
         f = open(filename, 'rb')
-        seed = cPickle.load(f)
+#        seed = cPickle.load(f)
         np.random.set_state(seed)
         f.close()
 
@@ -334,7 +334,7 @@ class CycleControl(dict):
             self['jobrcfilename'] = os.path.join(self['dir.exec'], strippedname)
 #            shutil.copy(os.path.join(self.dasystem['regionsfile']),os.path.join(self['dir.exec'],'da','analysis','copied_regions.nc'))
             logging.info('Copied regions file to the analysis directory: %s'%os.path.join(self.dasystem['regionsfile'])) 
-            if self.dasystem.has_key('extendedregionsfile'):
+            if 'extendedregionsfile' in self.dasystem:
                 shutil.copy(os.path.join(self.dasystem['extendedregionsfile']),os.path.join(self['dir.exec'],'da','analysis','copied_regions_extended.nc')) 
                 logging.info('Copied extended regions file to the analysis directory: %s'%os.path.join(self.dasystem['extendedregionsfile'])) 
             else: 
@@ -346,7 +346,7 @@ class CycleControl(dict):
             for filename in glob.glob(os.path.join(self['dir.exec'],'*.pickle')):
                 logging.info('Deleting pickle file %s to make sure the correct regions are used'%os.path.split(filename)[1])
                 os.remove(filename) 
-            if self.dasystem.has_key('random.seed.init'):
+            if 'random.seed.init' in self.dasystem:
                 self.read_random_seed(True)
 
         self.parse_times()
@@ -554,7 +554,7 @@ class CycleControl(dict):
             if '-t' in self.opts:
                 (self.opts).remove('-t') 
 
-            if not os.environ.has_key('icycle_in_job'):
+            if 'icycle_in_job' not in os.environ:
                 logging.info('Environment variable icycle_in_job not found, resubmitting after this cycle')
                 os.environ['icycle_in_job'] = self['da.resources.ncycles_per_job']  # assume that if no cycle number is set, we should submit the next job by default
             else:
@@ -567,14 +567,14 @@ class CycleControl(dict):
                 nextlogfilename = logfile.replace(jobid,nextjobid)
                 if self.daplatform.ID == 'WU capegrim':
                     template += '\nexport icycle_in_job=%d\npython %s rc=%s %s >&%s &\n' % (cycle+1,execcommand, nextrestartfilename, join(self.opts, ''), nextlogfilename,)
-            	else: 
-                    template += '\nexport icycle_in_job=%d\npython %s rc=%s %s >&%s\n' % (cycle+1,execcommand, nextrestartfilename, join(self.opts, ''), nextlogfilename,) 
+                else: 
+                    template += '\nexport icycle_in_job=%d\npython %s rc=%s %s >&%s\n' % (cycle+1,execcommand, nextrestartfilename, str.join(self.opts, ''), nextlogfilename,) 
 
             # write and submit 
             self.daplatform.write_job(jobfile, template, jobid)
-	    if 'da.resources.ncycles_per_job' in self:
-		do_submit = (int(os.environ['icycle_in_job']) >= int(self['da.resources.ncycles_per_job']))
-	    else:
+            if 'da.resources.ncycles_per_job' in self:
+                do_submit = (int(os.environ['icycle_in_job']) >= int(self['da.resources.ncycles_per_job']))
+            else:
                 dosubmit = False
           
             if do_submit:
@@ -615,7 +615,7 @@ def parse_options():
     args = []
     try:                                
         opts, args = getopt.gnu_getopt(sys.argv[1:], "-rvt")
-    except getopt.GetoptError, msg:           
+    except getopt.GetoptError as msg:           
         logging.error('%s' % msg)
         sys.exit(2)      
 
@@ -646,7 +646,7 @@ def parse_options():
             key, arg = item.split('=')
         else:
             logging.error('%s' % 'Argument passed without description (%s)' % item)
-            raise getopt.GetoptError, arg
+            raise getopt.GetoptError(arg)
 
         arguments[key] = arg
 
@@ -659,14 +659,14 @@ def validate_opts_args(opts, args):
  the specified rc-file.  
  
     """
-    if not args.has_key("rc"):
+    if "rc" not in args:
         msg = "There is no rc-file specified on the command line. Please use rc=yourfile.rc"
         logging.error(msg)
-        raise IOError, msg
+        raise IOError(msg)
     elif not os.path.exists(args['rc']):
         msg = "The specified rc-file (%s) does not exist " % args['rc'] 
         logging.error(msg)
-        raise IOError, msg
+        raise IOError(msg)
 
     # WP not needed anymore
     #if not args.has_key('process'):
diff --git a/da/tools/io4.py b/da/tools/io4.py
index 59ff0467..45dab0a9 100755
--- a/da/tools/io4.py
+++ b/da/tools/io4.py
@@ -21,7 +21,7 @@ File created on 15 Oct 2008.
 File modified for CT data assimilation system in July 2010, Wouter Peters
 
 """
-import standardvariables
+from . import standardvariables
 import netCDF4
 #import pyhdf.SD as hdf
 import datetime as dt
@@ -34,9 +34,9 @@ disclaimer = "This data belongs to the CarbonTracker project"
 email = "wouter.peters@wur.nl"
 url = "http://carbontracker.wur.nl"
 institution = "Wageningen University and Research Center"
-source 	= "CarbonTracker release 2.0" 
+source = "CarbonTracker release 2.0"
 conventions = "CF-1.1"
-historytext	= 'created on '+dt.datetime.now().strftime('%B %d, %Y')+' by %s'%os.environ['USER']
+historytext = 'created on '+dt.datetime.now().strftime('%B %d, %Y')+' by %s'%os.environ['USER']
 
 std_savedict={'name':'unknown','values':[],'dims':(0,0,),'units':'','long_name':'','comment':''}
 
@@ -60,7 +60,7 @@ class CT_CDF(netCDF4.Dataset):
     def __init__(self,filename, method='read'):
 
         if method not in ['read','write','create']:
-            raise ValueError, 'Method %s is not defined for a CarbonTracker NetCDF file object' % method
+            raise ValueError('Method %s is not defined for a CarbonTracker NetCDF file object' % method)
 
         if method == 'read':
             try:
@@ -167,7 +167,7 @@ class CT_CDF(netCDF4.Dataset):
         from da.analysis.tools_regions import ext_econams, ext_ecocomps
 
         if type not in ['eco','eco_ext','tc','tc_ext','olson']: 
-            raise ValueError,'Type of dimension for regions requested (%s) is not possible' %type
+            raise ValueError('Type of dimension for regions requested (%s) is not possible' %type)
 
         dimname='regions_%s' % type
 
@@ -232,9 +232,9 @@ class CT_CDF(netCDF4.Dataset):
 
     def has_date(self,dd):
 
-        if not self.dimensions.has_key('date'): 
+        if 'date' not in self.dimensions:
             return False
-        if not self.variables.has_key('date'): 
+        if 'date' not in self.variables:
             return False
         if self.dimensions['date'].isunlimited:
             if dd in self.get_variable('date').tolist():
@@ -258,7 +258,7 @@ class CT_CDF(netCDF4.Dataset):
 
     def standard_var(self,varname):
         """ return properties of standard variables """
-        import standardvariables
+        from . import standardvariables
 
         if varname in standardvariables.standard_variables.keys():
             return standardvariables.standard_variables[varname]
@@ -269,7 +269,7 @@ class CT_CDF(netCDF4.Dataset):
         """ return lenght of unlimited dimenion(s) """
 
         unlims=()
-        for dimname, dimobj in self.dimensions.iteritems():
+        for dimname, dimobj in self.dimensions.items():
             if dimobj.isunlimited() : unlims += (len(dimobj),)
 
         return unlims
@@ -277,7 +277,7 @@ class CT_CDF(netCDF4.Dataset):
     def has_unlimlen(self,dims):
         """ return T/F whether dimensions include an unlimited dimenion(s) """
 
-        for dimname, dimobj in self.dimensions.iteritems():
+        for dimname, dimobj in self.dimensions.items():
             if dimname in dims:
                 if dimobj.isunlimited() : return True
 
@@ -289,12 +289,12 @@ class CT_CDF(netCDF4.Dataset):
 
         existing_vars=self.variables
 
-        if existing_vars.has_key(datadict['name']):
+        if datadict['name'] in existing_vars:
             return
         else:
-            if not silent: print 'Creating new dataset: '+datadict['name']
+            if not silent: print('Creating new dataset: '+datadict['name'])
 
-            if datadict.has_key('dtype'):
+            if 'dtype' in datadict:
                 if datadict['dtype'] == 'int':
                     var = self.createVariable(datadict['name'],'i4',datadict['dims'])
                 elif datadict['dtype'] == 'int64':
@@ -310,7 +310,7 @@ class CT_CDF(netCDF4.Dataset):
             else:
                 var = self.createVariable(datadict['name'],'f4',datadict['dims'])
 
-            for k,v in datadict.iteritems(): 
+            for k,v in datadict.items(): 
                 if k not in ['name','dims','values','_FillValue','count']: 
                     var.setncattr(k,v)
 
@@ -327,7 +327,7 @@ class CT_CDF(netCDF4.Dataset):
             next=0
 
 
-        if existing_vars.has_key(datadict['name']):
+        if datadict['name'] in existing_vars:
             var = self.variables[datadict['name']] 
             ndims = var.ndim
 
@@ -344,13 +344,13 @@ class CT_CDF(netCDF4.Dataset):
             elif ndims == 5:
                 var[next:next+nsets,:,:,:,:]=datadict['values']
             else:
-                print 'More than 5 dimensions in array not implemented yet'
+                print('More than 5 dimensions in array not implemented yet')
                 raise ValueError
 
         else:
-            if not silent: print 'Creating new dataset: '+datadict['name']
+            if not silent: print('Creating new dataset: '+datadict['name'])
 
-            if datadict.has_key('dtype'):
+            if 'dtype' in datadict:
                 if datadict['dtype'] == 'int':
                     var = self.createVariable(datadict['name'],'i4',datadict['dims'])#,fill_value=datadict['_FillValue'])
                 elif datadict['dtype'] == 'int64':
@@ -366,7 +366,7 @@ class CT_CDF(netCDF4.Dataset):
             else:
                 var = self.createVariable(datadict['name'],'f4',datadict['dims'])#,fill_value=datadict['_FillValue'])
 
-            for k,v in datadict.iteritems(): 
+            for k,v in datadict.items(): 
                 if k not in ['name','dims','values','_FillValue','count']: 
                     var.setncattr(k,v)
 
@@ -386,7 +386,7 @@ class CT_CDF(netCDF4.Dataset):
                 elif ndims == 5:
                     var[next:next+nsets,:,:,:,:]=datadict['values']
                 else:
-                    print 'More than 5 dimensions in array not implemented yet'
+                    print('More than 5 dimensions in array not implemented yet')
                     raise ValueError
             else:
                 ndims = var.ndim
@@ -396,47 +396,47 @@ class CT_CDF(netCDF4.Dataset):
                 var[:] = datadict['values']
 
 try:
-	import pyhdf.SD as hdf
-	class CT_HDF(hdf.SD):
-	    """ function opens a HDF file for reading """
-
-	    def __init__(self,filename, method='read'):
-
-		if method in ['write','create']:
-		    raise ValueError, 'Method %s is not defined for a CarbonTracker HDF file object' % method
-
-		if method == 'read':
-		    #print 'Reading from file'
-		    try:
-			super(CT_HDF,self).__init__(filename) 
-		    except hdf.HDF4Error: 
-			msg = 'Requested file not found for opening: %s'%filename ; logging.error(msg)
-			msg = "Exiting" ; logging.info(msg)
-			sys.exit(2)
-
-	    def get_variable(self,varname):
-		""" get variable from ncf file"""
-		return self.select(varname).get()
-
-	    def get_attribute(self,attname):
-		""" get attribute from ncf file"""
-		return getattr(self,attname)
-
-	    def standard_var(self,varname):
-		""" return properties of standard variables """
-		import standardvariables
-
-		if varname in standardvariables.standard_variables.keys():
-		    return standardvariables.standard_variables[varname]
-		else:
-		    return standardvariables.standard_variables['unknown']
+        import pyhdf.SD as hdf
+        class CT_HDF(hdf.SD):
+            """ function opens a HDF file for reading """
+
+            def __init__(self,filename, method='read'):
+
+                if method in ['write','create']:
+                    raise ValueError('Method %s is not defined for a CarbonTracker HDF file object' % method)
+
+                if method == 'read':
+                    #print 'Reading from file'
+                    try:
+                        super(CT_HDF,self).__init__(filename) 
+                    except hdf.HDF4Error: 
+                        msg = 'Requested file not found for opening: %s'%filename ; logging.error(msg)
+                        msg = "Exiting" ; logging.info(msg)
+                        sys.exit(2)
+
+            def get_variable(self,varname):
+                """ get variable from ncf file"""
+                return self.select(varname).get()
+
+            def get_attribute(self,attname):
+                """ get attribute from ncf file"""
+                return getattr(self,attname)
+
+            def standard_var(self,varname):
+                """ return properties of standard variables """
+                from . import standardvariables
+
+                if varname in standardvariables.standard_variables.keys():
+                    return standardvariables.standard_variables[varname]
+                else:
+                    return standardvariables.standard_variables['unknown']
 
-	    def close(self):
-		""" close file"""
+            def close(self):
+                """ close file"""
 
-		return self.end()
+                return self.end()
 except:
-	print('IO Class CT_HDF not compiled, no HDF support!!!')
+        print('IO Class CT_HDF not compiled, no HDF support!!!')
 
 
 
@@ -468,7 +468,7 @@ if __name__ == '__main__':
     import numpy as np
 
     ncf=CT_CDF('test.nc','create')
-    print ncf.file_format
+    print(ncf.file_format)
     dimmembers=ncf.add_members_dim(200)
     dimparams=ncf.add_params_dim(200)
 
diff --git a/da/tools/pipeline.py b/da/tools/pipeline.py
index 2ceacc3a..91a763bd 100755
--- a/da/tools/pipeline.py
+++ b/da/tools/pipeline.py
@@ -56,18 +56,18 @@ def forward_pipeline(dacycle, platform, dasystem, samples, statevector, obsopera
     logging.info(header + "Initializing current cycle" + footer)
     start_job(dacycle, dasystem, platform, statevector, samples, obsoperator)                   
 
-    if dacycle.has_key('forward.savestate.exceptsam'):
+    if 'forward.savestate.exceptsam' in dacycle:
         sam = (dacycle['forward.savestate.exceptsam'].upper() in ["TRUE","T","YES","Y"])
     else:
         sam = False
 
-    if dacycle.has_key('forward.savestate.dir'):
+    if 'forward.savestate.dir' in dacycle:
         fwddir = dacycle['forward.savestate.dir']
     else:
         logging.debug("No forward.savestate.dir key found in rc-file, proceeding with self-constructed prior parameters")
         fwddir = False
 
-    if dacycle.has_key('forward.savestate.legacy'):
+    if 'forward.savestate.legacy' in dacycle:
         legacy = (dacycle['forward.savestate.legacy'].upper() in ["TRUE","T","YES","Y"])
     else:
         legacy = False
@@ -184,7 +184,7 @@ def analysis_pipeline(dacycle, platform, dasystem, samples, statevector):
 def archive_pipeline(dacycle, platform, dasystem):
     """ Main entry point for archiving of output from one disk/system to another """
 
-    if not dacycle.has_key('task.rsync'):
+    if not 'task.rsync' in dacycle:
         logging.info('rsync task not found, not starting automatic backup...')
         return
     else:
@@ -374,7 +374,7 @@ def invert(dacycle, statevector, optimizer):
                   int(dacycle.dasystem['nparameters']),
                   statevector.nobs)
 
-    if not dacycle.dasystem.has_key('opt.algorithm'):
+    if 'opt.algorithm' not in dacycle.dasystem:
         logging.info("There was no minimum least squares algorithm specified in the DA System rc file (key : opt.algorithm)") 
         logging.info("...using serial algorithm as default...")
         optimizer.set_algorithm('Serial')
diff --git a/da/tools/rc.py b/da/tools/rc.py
index 8f3fb539..5692ffa3 100755
--- a/da/tools/rc.py
+++ b/da/tools/rc.py
@@ -284,7 +284,7 @@ class RcFile(object) :
         # check ...
         if not os.path.exists(filename) :
             msg = 'rcfile not found : %s' % filename ; logging.error(msg)
-            raise IOError, msg
+            raise IOError(msg)
         #endif
         
         # store file name:
@@ -597,14 +597,14 @@ class RcFile(object) :
                     # remove enclosing characters:
                     key = pat.lstrip(start_mark).rstrip(close_mark)
                     # test some dictionaries for matching key:
-                    if self.values.has_key(key) :
+                    if key in self.values:
                         # get previously defined value:
                         val = self.values[key]
                         # substitute value:
                         line = line.replace(pat, val)
                         # set flag:
                         something_done = True
-                    elif os.environ.has_key(key) :
+                    elif key in os.environ:
                         # get value from environment:
                         val = os.environ[key]
                         # substitute value:
@@ -675,7 +675,7 @@ class RcFile(object) :
                     if not os.path.exists(inc_file) :
                         logging.error('include file not found : %s' % inc_file)
                         logging.error(linetrace)
-                        raise IOError, 'include file not found : %s' % inc_file
+                        raise IOError('include file not found : %s' % inc_file)
                     #endif
                     # read content:
                     inc_f = open(inc_file, 'r')
@@ -821,7 +821,7 @@ class RcFile(object) :
                 val = val.strip()
 
                 # already defined ?
-                if self.values.has_key(key) :
+                if key in self.values:
                     # this will occure often after the first pass since
                     # the keys are resolved again and again ;
                     # therefore, only complain if this definition is read
@@ -851,7 +851,7 @@ class RcFile(object) :
             #print '~~~ outfile ~~~~~~~~~~~~~~~~~~~~~~~'
             #for line in self.outfile : print line.strip()
             #print '~~~ key/values ~~~~~~~~~~~~~~~~~~~~'
-            #for k,v in self.iteritems() :
+            #for k,v in self.items() :
             #    print '%s  :  %s' % (k,v)
             ##endfor
             #print '-------------------------------------------------'
@@ -927,7 +927,7 @@ class RcFile(object) :
     def has_key(self, key) :
     
         # from dictionairy:
-        return self.values.has_key(key)
+        return key in self.values
         
     #enddef
     
@@ -963,7 +963,7 @@ class RcFile(object) :
         """
         
         # element found ?
-        if self.values.has_key(key) :
+        if key in self.values:
             # copy value:
             value = self.values[key]
             # convert ?
@@ -1096,7 +1096,7 @@ class RcFile(object) :
             # remove enclosing characters:
             key = pat.lstrip(start_mark).rstrip(close_mark)
             # test dictionary for matching key:
-            if self.values.has_key(key) :
+            if key in self.values:
                 # get previously defined value:
                 val = self.values[key]
                 # substitute value:
@@ -1121,7 +1121,7 @@ class RcFile(object) :
         f = open(filename, 'w')
 
         ## loop over key/value pairs:
-        #for k,v in self.iteritems():
+        #for k,v in self.items():
         #    # add line; at least the specified number of characters 
         #    # is used for the key:
         #    f.write( '%-20s:%s\n' % (k,v) )
@@ -1227,7 +1227,7 @@ if __name__ == '__main__':
     
     # print documentation ?
     if opts.doc :
-        print __doc__
+        print(__doc__)
         sys.exit(0)
     #endif
     
@@ -1248,7 +1248,7 @@ if __name__ == '__main__':
     
     # print pre-processed file ?
     if opts.write :
-        for line in rcf.outfile : print line.strip()
+        for line in rcf.outfile : print(line.strip())
         sys.exit(0)
     #endif
 
@@ -1260,7 +1260,7 @@ if __name__ == '__main__':
     rckey = args[1]
     
     # key present ?
-    if rcf.has_key(rckey) :
+    if rckey in rcf:
 
         # print requested value:
         if opts.boolean :
@@ -1268,15 +1268,15 @@ if __name__ == '__main__':
             flag = rcf.get(rckey, 'bool')
             # print result:
             if flag :
-                print 'True'
+                print('True')
             else :
-                print 'False'
+                print('False')
             #endif
         else :
             # extract value:
             value = rcf.get(rckey)
             # display:
-            print value
+            print(value)
         #endif
         
     else :
@@ -1284,9 +1284,9 @@ if __name__ == '__main__':
         # default value provided ?
         if opts.default != None :
             # display:
-            print opts.default
+            print(opts.default)
         else :
-            print 'ERROR - key "%s" not found in rcfile "%s" and no default specified' % (rckey, rcfile)
+            print('ERROR - key "%s" not found in rcfile "%s" and no default specified' % (rckey, rcfile))
             sys.exit(1)
         #endif
 
diff --git a/da/tools/rc_old.py b/da/tools/rc_old.py
index f8c27e33..3d529590 100755
--- a/da/tools/rc_old.py
+++ b/da/tools/rc_old.py
@@ -742,7 +742,7 @@ class RcFile(object) :
             #print '~~~ outfile ~~~~~~~~~~~~~~~~~~~~~~~'
             #for line in self.outfile : print line.strip()
             #print '~~~ key/values ~~~~~~~~~~~~~~~~~~~~'
-            #for k,v in self.iteritems() :
+            #for k,v in self.items() :
             #    print '%s  :  %s' % (k,v)
             ##endfor
             #print '-------------------------------------------------'
@@ -986,7 +986,7 @@ class RcFile(object) :
         f = open(filename, 'w')
 
         ## loop over key/value pairs:
-        #for k,v in self.iteritems():
+        #for k,v in self.items():
         #    # add line; at least the specified number of characters 
         #    # is used for the key:
         #    f.write( '%-20s:%s\n' % (k,v) )
diff --git a/template.rc b/template.rc
index e7960d5a..8bd08d19 100644
--- a/template.rc
+++ b/template.rc
@@ -28,8 +28,8 @@
 !
 ! The time for which to start and end the data assimilation experiment in format YYYY-MM-DD HH:MM:SS
 
-time.start          : 2012-01-01 00:00:00
-time.finish         : 2012-01-15 00:00:00
+time.start          : 2016-01-01 00:00:00
+time.finish         : 2016-02-01 00:00:00
 
 ! Whether to restart the CTDAS system from a previous cycle, or to start the sequence fresh. Valid entries are T/F/True/False/TRUE/FALSE
 
@@ -37,7 +37,7 @@ time.restart        : False
 
 ! The length of a cycle is given in days, such that the integer 7 denotes the typically used weekly cycle. Valid entries are integers > 1
 
-time.cycle          : 1
+time.cycle          : 7
 
 ! The number of cycles of lag to use for a smoother version of CTDAS. CarbonTracker CO2 typically uses 5 weeks of lag. Valid entries are integers > 0
 
-- 
GitLab