diff --git a/gridded/da/baseclasses/dasystem.py b/gridded/da/baseclasses/dasystem.py
index 814fed76f29365e572f71b8a0ac6a9d30ef872e9..4da5f807309dfbe5b8f19ee7f045c210a1c35724 100755
--- a/gridded/da/baseclasses/dasystem.py
+++ b/gridded/da/baseclasses/dasystem.py
@@ -49,21 +49,20 @@ class DaSystem(dict):
         to the dictionary
         """
 
-        self.Identifier = 'CarbonTracker CO2'    # the identifier gives the platform name
+        self.ID = 'CarbonTracker CO2'    # the identifier gives the platform name
         self.load_rc(rcfilename)
 
         logging.debug("Data Assimilation System initialized: %s" % self.Identifier)
 
-    def load_rc(self, RcFileName):
+    def load_rc(self, rcfilename):
         """ 
         This method loads a DA System Info rc-file with settings for this simulation 
         """
 
-        for k, v in rc.read(RcFileName).iteritems():
+        for k, v in rc.read(rcfilename).iteritems():
             self[k] = v
-        self.RcFileName = RcFileName
-        self.DaRcLoaded = True
-        logging.debug("DA System Info rc-file (%s) loaded successfully" % self.RcFileName)
+        
+        logging.debug("DA System Info rc-file (%s) loaded successfully" % rcfilename)
 
 
     def validate(self):
diff --git a/gridded/da/baseclasses/obs.py b/gridded/da/baseclasses/obs.py
index 293424fa5781ba4873428286c1d1498144d8b9fb..a811afe604ce5ca63eb9c1326bbf93bc3f5be90d 100755
--- a/gridded/da/baseclasses/obs.py
+++ b/gridded/da/baseclasses/obs.py
@@ -49,14 +49,14 @@ class Observations(object):
         """
         create an object with an identifier, version, and an empty ObservationList
         """
-        self.Identifier = identifier
-        self.Version = version
+        self.ID = identifier
+        self.version = version
         self.datalist = []  # initialize with an empty list of obs
 
         # The following code allows the object to be initialized with a DaCycle object already present. Otherwise, it can
         # be added at a later moment.
 
-        logging.info('Observations object initialized: %s' % self.Identifier)
+        logging.info('Observations object initialized: %s' % self.ID)
 
     def getlength(self):
         return len(self.datalist)
diff --git a/gridded/da/baseclasses/observationoperator.py b/gridded/da/baseclasses/observationoperator.py
index 2381038730f2606231f8abf989e13ae4cc934d7e..2a20818f857bf1885400665cec55c22a275806ef 100755
--- a/gridded/da/baseclasses/observationoperator.py
+++ b/gridded/da/baseclasses/observationoperator.py
@@ -28,14 +28,14 @@ class ObservationOperator(object):
 
     """
 
-    def __init__(self, RcFileName, DaCycle=None):
+    def __init__(self, rcfilename, DaCycle=None):
         """ The instance of an ObservationOperator is application dependent """
         self.Identifier = identifier
         self.Version = version
         self.RestartFileList = []
         self.outputdir = None # Needed for opening the samples.nc files created 
 
-        self.load_rc(RcFileName)   # load the specified rc-file
+        self.load_rc(rcfilename)   # load the specified rc-file
         self.validate_rc()         # validate the contents
 
         logging.info('Observation Operator object initialized: %s' % self.Identifier)
diff --git a/gridded/da/baseclasses/optimizer.py b/gridded/da/baseclasses/optimizer.py
index 7a69636a5c6d7a19575cef527ccb609af182de89..d213b45e896bc439b4653e0e87d4bd43829f84f4 100755
--- a/gridded/da/baseclasses/optimizer.py
+++ b/gridded/da/baseclasses/optimizer.py
@@ -10,10 +10,7 @@ File created on 28 Jul 2010.
 
 """
 
-import os
-import sys
 import logging
-import datetime
 import numpy as np
 import numpy.linalg as la
 
@@ -33,10 +30,10 @@ class Optimizer(object):
     """
 
     def __init__(self):
-        self.Identifier = identifier
-        self.Version = version
+        self.ID = identifier
+        self.version = version
 
-        logging.info('Optimizer object initialized: %s' % self.Identifier)
+        logging.info('Optimizer object initialized: %s' % self.ID)
 
     def initialize(self, dims):
         self.nlag = dims[0]
@@ -97,24 +94,24 @@ class Optimizer(object):
         allsimulated = None  # collect all members model samples for n=1,..,nlag
 
         for n in range(self.nlag):
-            Samples = StateVector.ObsToAssimmilate[n]
+            samples = StateVector.ObsToAssimmilate[n]
             members = StateVector.EnsembleMembers[n]
             self.x[n * self.nparams:(n + 1) * self.nparams] = members[0].ParameterValues
             self.X_prime[n * self.nparams:(n + 1) * self.nparams, :] = np.transpose(np.array([m.ParameterValues for m in members]))
 
-            if Samples != None:        
-                self.rejection_threshold = Samples.rejection_threshold
+            if samples != None:        
+                self.rejection_threshold = samples.rejection_threshold
 
-                allreject.extend(Samples.getvalues('may_reject'))
-                alllocalize.extend(Samples.getvalues('may_localize'))
-                allflags.extend(Samples.getvalues('flag'))
-                allspecies.extend(Samples.getvalues('species'))
-                allobs.extend(Samples.getvalues('obs'))
-                allsites.extend(Samples.getvalues('code'))
-                allmdm.extend(Samples.getvalues('mdm'))
-                allids.extend(Samples.getvalues('id'))
+                allreject.extend(samples.getvalues('may_reject'))
+                alllocalize.extend(samples.getvalues('may_localize'))
+                allflags.extend(samples.getvalues('flag'))
+                allspecies.extend(samples.getvalues('species'))
+                allobs.extend(samples.getvalues('obs'))
+                allsites.extend(samples.getvalues('code'))
+                allmdm.extend(samples.getvalues('mdm'))
+                allids.extend(samples.getvalues('id'))
 
-                simulatedensemble = Samples.getvalues('simulated')
+                simulatedensemble = samples.getvalues('simulated')
 
                 if allsimulated == None :     
                     allsimulated = np.array(simulatedensemble)
@@ -150,7 +147,7 @@ class Optimizer(object):
 
         logging.debug('Returning optimized data to the StateVector, setting "StateVector.isOptimized = True" ')
 
-    def write_diagnostics(self, filename, type='prior'):
+    def write_diagnostics(self, filename, type):
         """
             Open a NetCDF file and write diagnostic output from optimization process:
 
diff --git a/gridded/da/baseclasses/platform.py b/gridded/da/baseclasses/platform.py
index 7d6a0e625e1473fcb4eb4df224a839aaa0f198e1..f8daf9a4be589afb2100fa6dc62d86f6abace19b 100755
--- a/gridded/da/baseclasses/platform.py
+++ b/gridded/da/baseclasses/platform.py
@@ -8,14 +8,14 @@
 Revision History:
 File created on 06 Sep 2010.
 
-The PlatForm class is found in the module :mod:`platform`, or in a specific implementation under the da/source tree. 
+The Platform class is found in the module :mod:`platform`, or in a specific implementation under the da/source tree. 
 
 The platform object holds attributes and methods that allow job control on each specific platform. This includes methods to create and submit jobs, but also to obtain process and/or job ID's. These are needed to control the flow of 
 the system on each platform.
 
-Typically, every platform needs specific implementations of this object (through inheritance), and you should refer to your specific PlatForm object documentation for details (see *da/platform/*).
+Typically, every platform needs specific implementations of this object (through inheritance), and you should refer to your specific Platform object documentation for details (see *da/platform/*).
 
-.. autoclass:: da.baseclasses.platform.PlatForm
+.. autoclass:: da.baseclasses.platform.Platform
    :members:
    :inherited-members:
 
@@ -27,22 +27,22 @@ import subprocess
 
 std_joboptions = {'jobname':'test', 'jobaccount':'co2', 'jobnodes':'nserial 1', 'jobshell':'/bin/sh', 'depends':'', 'jobtime':'01:00:00'}
 
-class PlatForm(object):
+class Platform(object):
     """ 
     This specifies platform dependent options under generic object calls. A platform object is used to control and submit jobs
     """
 
     def __init__(self):
         """
-        The init function reports the hard-coded ``Identifier`` and ``Version`` of the PlatForm. Since each new
-        computer/user requires their own PlatForm object modifications, the init function is usually overwritten
+        The init function reports the hard-coded ``Identifier`` and ``Version`` of the Platform. Since each new
+        computer/user requires their own Platform object modifications, the init function is usually overwritten
         in the specific implementation of this class
         """
-        self.Identifier = 'iPad'    # the identifier gives the plaform name
-        self.Version = '1.0'     # the platform version used
+        self.ID = 'iPad'    # the identifier gives the plaform name
+        self.version = '1.0'     # the platform version used
 
-        logging.debug('%s object initialized' % self.Identifier)
-        logging.debug('%s version: %s' % (self.Identifier, self.Version))
+        logging.debug('%s object initialized' % self.ID)
+        logging.debug('%s version: %s' % (self.ID, self.version))
 
     def give_blocking_flag(self):
         return ""
diff --git a/gridded/da/baseclasses/statevector.py b/gridded/da/baseclasses/statevector.py
index fb67596efe3ab3f0d770e5e00487caf65192cc67..276c8793ccef6b6e174acaa97189a85f9c2cedf8 100755
--- a/gridded/da/baseclasses/statevector.py
+++ b/gridded/da/baseclasses/statevector.py
@@ -115,13 +115,13 @@ class StateVector(object):
     """
 
     def __init__(self):
-        self.Identifier = identifier
-        self.Version = version
+        self.ID = identifier
+        self.version = version
 
         # The following code allows the object to be initialized with a DaCycle object already present. Otherwise, it can
         # be added at a later moment.
 
-        logging.info('Statevector object initialized: %s' % self.Identifier)
+        logging.info('Statevector object initialized: %s' % self.ID)
 
     def initialize(self, DaCycle):
         """
@@ -274,7 +274,7 @@ class StateVector(object):
             newmember.ParameterValues = np.dot(C, rands) + newmean
             self.EnsembleMembers[lag].append(newmember)
 
-        logging.debug('%d new ensemble members were added to the state vector # %d' % (self.nmembers, lag + 1))
+        logging.debug('%d new ensemble members were added to the state vector # %d' % (self.nmembers, (lag + 1)))
 
 
     def propagate(self, DaCycle):
@@ -380,7 +380,7 @@ class StateVector(object):
 
         #import da.tools.io as io
         f = io.CT_Read(filename, 'read')
-        MeanState = f.get_variable('statevectormean_' + qual)
+        meanstate = f.get_variable('statevectormean_' + qual)
         EnsembleMembers = f.get_variable('statevectorensemble_' + qual)
         f.close()
 
@@ -390,9 +390,9 @@ class StateVector(object):
                 logging.warning('Existing ensemble for lag=%d was removed to make place for newly read data' % (n + 1))
 
             for m in range(self.nmembers):
-                NewMember = EnsembleMember(m)
-                NewMember.ParameterValues = EnsembleMembers[n, m, :].flatten() + MeanState[n]  # add the mean to the deviations to hold the full parameter values
-                self.EnsembleMembers[n].append(NewMember)
+                newmember = EnsembleMember(m)
+                newmember.ParameterValues = EnsembleMembers[n, m, :].flatten() + meanstate[n]  # add the mean to the deviations to hold the full parameter values
+                self.EnsembleMembers[n].append(newmember)
 
         logging.info('Successfully read the State Vector from file (%s) ' % filename)
 
diff --git a/gridded/da/carbondioxide/dasystem.py b/gridded/da/carbondioxide/dasystem.py
index daf9444edbe8ed88850bcc895542ae547ecaafc0..2f95c2caa6eba2d4e80c4d913f4d81a409db78db 100755
--- a/gridded/da/carbondioxide/dasystem.py
+++ b/gridded/da/carbondioxide/dasystem.py
@@ -35,8 +35,10 @@ class CO2DaSystem(DaSystem):
 
 
         for k, v in self.iteritems():
-            if v == 'True' : self[k] = True
-            if v == 'False': self[k] = False
+            if v == 'True' : 
+                self[k] = True
+            if v == 'False': 
+                self[k] = False
 
         for key in needed_rc_items:
             if not self.has_key(key):
diff --git a/gridded/da/carbondioxide/obs.py b/gridded/da/carbondioxide/obs.py
index 5c3ff2772d44a98b46ea1a7472e6e475b6c41a2f..8696a99fb0a590c062920999670490f7a036c910 100755
--- a/gridded/da/carbondioxide/obs.py
+++ b/gridded/da/carbondioxide/obs.py
@@ -255,15 +255,15 @@ class CO2Observations(Observations):
             logging.error(msg)
             raise IOError, msg
         else:
-            self.SitesFile = filename
+            self.sites_file = filename
 
-        SitesWeights = rc.read(self.SitesFile)
+        sites_weights = rc.read(self.sites_file)
 
-        self.rejection_threshold = int(SitesWeights['obs.rejection.threshold'])
-        self.global_R_scaling = float(SitesWeights['global.R.scaling'])
-        self.n_site_categories = int(SitesWeights['n.site.categories'])
-        self.n_sites_active = int(SitesWeights['n.sites.active'])
-        self.n_sites_moved = int(SitesWeights['n.sites.moved'])
+        self.rejection_threshold = int(sites_weights['obs.rejection.threshold'])
+        self.global_R_scaling = float(sites_weights['global.R.scaling'])
+        self.n_site_categories = int(sites_weights['n.site.categories'])
+        self.n_sites_active = int(sites_weights['n.sites.active'])
+        self.n_sites_moved = int(sites_weights['n.sites.moved'])
 
         logging.debug('Model-data mismatch rejection threshold: %d ' % self.rejection_threshold)
         logging.debug('Model-data mismatch scaling factor     : %f ' % self.global_R_scaling)
@@ -271,11 +271,11 @@ class CO2Observations(Observations):
         logging.debug('Model-data mismatch active sites       : %d ' % self.n_sites_active)
         logging.debug('Model-data mismatch moved sites        : %d ' % self.n_sites_moved)
    
-        cats = [k for k in SitesWeights.keys() if 'site.category' in k] 
+        cats = [k for k in sites_weights.keys() if 'site.category' in k] 
 
         SiteCategories = {}
         for key in cats:
-            name, error, may_localize, may_reject = SitesWeights[key].split(';')
+            name, error, may_localize, may_reject = sites_weights[key].split(';')
             name = name.strip().lower()
             error = float(error)
             may_reject = ("TRUE" in may_reject.upper())
@@ -284,30 +284,30 @@ class CO2Observations(Observations):
             #print name,SiteCategories[name]
 
 
-        active = [k for k in SitesWeights.keys() if 'site.active' in k] 
+        active = [k for k in sites_weights.keys() if 'site.active' in k] 
 
-        SiteInfo = {}
+        site_info = {}
         for key in active:
-            sitename, sitecategory = SitesWeights[key].split(';')
+            sitename, sitecategory = sites_weights[key].split(';')
             sitename = sitename.strip().lower()
             sitecategory = sitecategory.strip().lower()
-            SiteInfo[sitename] = SiteCategories[sitecategory]
-            #print sitename,SiteInfo[sitename]
+            site_info[sitename] = SiteCategories[sitecategory]
+            #print sitename,site_info[sitename]
 
         for obs in self.datalist:
             obs.mdm = 1000.0  # default is very high model-data-mismatch, until explicitly set by script
-            if SiteInfo.has_key(obs.code): 
+            if site_info.has_key(obs.code): 
                 logging.debug("Observation found (%s)" % obs.code)
-                obs.mdm = SiteInfo[obs.code]['error'] * self.global_R_scaling
-                obs.may_localize = SiteInfo[obs.code]['may_localize']
-                obs.may_reject = SiteInfo[obs.code]['may_reject']
+                obs.mdm = site_info[obs.code]['error'] * self.global_R_scaling
+                obs.may_localize = site_info[obs.code]['may_localize']
+                obs.may_reject = site_info[obs.code]['may_reject']
             else:
-                logging.warning("Observation NOT found (%s, %s), please check sites.rc file  (%s)  !!!" % (obs.code, identifier, self.SitesFile))
+                logging.warning("Observation NOT found (%s, %s), please check sites.rc file  (%s)  !!!" % (obs.code, identifier, self.sites_file))
                 obs.flag = 99
 
-            # Add SiteInfo dictionary to the Observations object for future use
+            # Add site_info dictionary to the Observations object for future use
 
-            self.SiteInfo = SiteInfo
+            self.site_info = site_info
 
     def write_obs_to_file(self, outfile):
         """ 
diff --git a/gridded/da/carbondioxide/obspack.py b/gridded/da/carbondioxide/obspack.py
index a0920dd1c2b2713c78a2c5e250e25b2b71f4b642..40b180dbbb55777116837719739fbbbce521f771 100755
--- a/gridded/da/carbondioxide/obspack.py
+++ b/gridded/da/carbondioxide/obspack.py
@@ -46,8 +46,8 @@ class ObsPackObservations(Observations):
             logging.error(msg)
             raise IOError, msg
         else:
-            self.ObsPackDir = op_dir
-            self.ObsPackId = op_id
+            self.obspack_dir = op_dir
+            self.obspack_id = op_id
 
         self.datalist = []
 
@@ -62,7 +62,7 @@ class ObsPackObservations(Observations):
 
         # Step 1: Read list of available site files in package
 
-        infile = os.path.join(self.ObsPackDir, 'summary', '%s_dataset_summary.txt' % (self.ObsPackId,))
+        infile = os.path.join(self.obspack_dir, 'summary', '%s_dataset_summary.txt' % (self.obspack_id,))
         f = open(infile, 'r')
         lines = f.readlines()
         f.close()
@@ -79,7 +79,7 @@ class ObsPackObservations(Observations):
 
         for ncfile in ncfilelist:
 
-            infile = os.path.join(self.ObsPackDir, 'data', 'nc', ncfile)
+            infile = os.path.join(self.obspack_dir, 'data', 'nc', ncfile)
             ncf = io.CT_Read(infile, 'read')
             idates = ncf.get_variable('time_components')
             dates = array([dtm.datetime(*d) for d in idates])
@@ -267,15 +267,15 @@ class ObsPackObservations(Observations):
             logging.error(msg)
             raise IOError, msg
         else:
-            self.SitesFile = filename
+            self.sites_file = filename
 
-        SitesWeights = rc.read(self.SitesFile)
+        sites_weights = rc.read(self.sites_file)
 
-        self.rejection_threshold = int(SitesWeights['obs.rejection.threshold'])
-        self.global_R_scaling = float(SitesWeights['global.R.scaling'])
-        self.n_site_categories = int(SitesWeights['n.site.categories'])
-        self.n_sites_active = int(SitesWeights['n.sites.active'])
-        self.n_sites_moved = int(SitesWeights['n.sites.moved'])
+        self.rejection_threshold = int(sites_weights['obs.rejection.threshold'])
+        self.global_R_scaling = float(sites_weights['global.R.scaling'])
+        self.n_site_categories = int(sites_weights['n.site.categories'])
+        self.n_sites_active = int(sites_weights['n.sites.active'])
+        self.n_sites_moved = int(sites_weights['n.sites.moved'])
 
         logging.debug('Model-data mismatch rejection threshold: %d ' % (self.rejection_threshold))
         logging.warning('Model-data mismatch scaling factor     : %f ' % (self.global_R_scaling))
@@ -283,28 +283,28 @@ class ObsPackObservations(Observations):
         logging.debug('Model-data mismatch active sites       : %d ' % (self.n_sites_active))
         logging.debug('Model-data mismatch moved sites        : %d ' % (self.n_sites_moved))
    
-        cats = [k for k in SitesWeights.keys() if 'site.category' in k] 
+        cats = [k for k in sites_weights.keys() if 'site.category' in k] 
 
-        SiteCategories = {}
+        site_categories = {}
         for key in cats:
-            name, error, may_localize, may_reject = SitesWeights[key].split(';')
+            name, error, may_localize, may_reject = sites_weights[key].split(';')
             name = name.strip().lower()
             error = float(error)
             may_localize = ("TRUE" in may_localize.upper())
             may_reject = ("TRUE" in may_reject.upper())
-            SiteCategories[name] = {'category':name, 'error':error, 'may_localize':may_localize, 'may_reject':may_reject}
-            #print name,SiteCategories[name]
+            site_categories[name] = {'category':name, 'error':error, 'may_localize':may_localize, 'may_reject':may_reject}
+            #print name,site_categories[name]
 
 
-        active = [k for k in SitesWeights.keys() if 'site.active' in k] 
+        active = [k for k in sites_weights.keys() if 'site.active' in k] 
 
-        SiteInfo = {}
+        site_info = {}
         for key in active:
-            sitename, sitecategory = SitesWeights[key].split(';')
+            sitename, sitecategory = sites_weights[key].split(';')
             sitename = sitename.strip()
             sitecategory = sitecategory.strip().lower()
-            SiteInfo[sitename] = SiteCategories[sitecategory]
-            #print sitename,SiteInfo[sitename]
+            site_info[sitename] = site_categories[sitecategory]
+            #print sitename,site_info[sitename]
 
         for obs in self.datalist:
 
@@ -317,22 +317,22 @@ class ObsPackObservations(Observations):
 
             identifier = name_convert(name="%s_%s_%s" % (site.lower(), method.lower(), lab.lower(),), to='GV')
 
-            if SiteInfo.has_key(identifier): 
+            if site_info.has_key(identifier): 
                 logging.debug("Observation found (%s, %s)" % (obs.code, identifier))
-                obs.mdm = SiteInfo[identifier]['error'] * self.global_R_scaling
-                obs.may_localize = SiteInfo[identifier]['may_localize']
-                obs.may_reject = SiteInfo[identifier]['may_reject']
+                obs.mdm = site_info[identifier]['error'] * self.global_R_scaling
+                obs.may_localize = site_info[identifier]['may_localize']
+                obs.may_reject = site_info[identifier]['may_reject']
                 obs.flag = 0
             else:
-                logging.warning("Observation NOT found (%s, %s), please check sites.rc file  (%s)  !!!" % (obs.code, identifier, self.SitesFile))
+                logging.warning("Observation NOT found (%s, %s), please check sites.rc file  (%s)  !!!" % (obs.code, identifier, self.sites_file))
 
-            if SiteInfo[identifier]['category'] == 'do-not-use':
+            if site_info[identifier]['category'] == 'do-not-use':
                 logging.warning("Observation found (%s, %s), but not used in assimilation !!!" % (obs.code, identifier))
                 obs.flag = 99
 
-            # Add SiteInfo dictionary to the Observations object for future use
+            # Add site_info dictionary to the Observations object for future use
 
-            self.SiteInfo = SiteInfo
+            self.site_info = site_info
 
     def write_obs_to_file(self, outfile):
         """ 
diff --git a/gridded/da/carbondioxide/obspack_geocarbon.py b/gridded/da/carbondioxide/obspack_geocarbon.py
index 3a7bfa22a60da7783b1e825fb185415d4ad094e8..0243384a5264be2376111545cef53d6be5d9900f 100755
--- a/gridded/da/carbondioxide/obspack_geocarbon.py
+++ b/gridded/da/carbondioxide/obspack_geocarbon.py
@@ -42,8 +42,8 @@ class ObsPackObservations(Observations):
             logging.error(msg)
             raise IOError, msg
         else:
-            self.ObsPackDir = op_dir
-            self.ObsPackId = op_id
+            self.obspack_dir = op_dir
+            self.obspack_id = op_id
 
         self.datalist = []
 
@@ -57,7 +57,7 @@ class ObsPackObservations(Observations):
 
         # Step 1: Read list of available site files in package
 
-        infile = os.path.join(self.ObsPackDir, 'summary', '%s_dataset_summary.txt' % (self.ObsPackId,))
+        infile = os.path.join(self.obspack_dir, 'summary', '%s_dataset_summary.txt' % (self.obspack_id,))
         f = open(infile, 'r')
         lines = f.readlines()
         f.close()
@@ -77,7 +77,7 @@ class ObsPackObservations(Observations):
 
         for ncfile in ncfilelist:
 
-            infile = os.path.join(self.ObsPackDir, 'data', 'nc', ncfile + '.nc')
+            infile = os.path.join(self.obspack_dir, 'data', 'nc', ncfile + '.nc')
             ncf = io.CT_Read(infile, 'read')
             idates = ncf.get_variable('time_components')
             dates = array([dtm.datetime(*d) for d in idates])
@@ -162,7 +162,7 @@ class ObsPackObservations(Observations):
             f.close()
             #return obsinputfile
 
-        for key, value in self.SiteMove.iteritems():
+        for key, value in self.site_move.iteritems():
             msg = "Site is moved by %3.2f degrees latitude and %3.2f degrees longitude" % value 
             f.AddAttribute(key, msg)
 
@@ -266,44 +266,44 @@ class ObsPackObservations(Observations):
             logging.error(msg)
             raise IOError, msg
         else:
-            self.SitesFile = filename
+            self.sites_file = filename
 
-        SitesWeights = rc.read(self.SitesFile)
+        sites_weights = rc.read(self.sites_file)
 
-        self.rejection_threshold = int(SitesWeights['obs.rejection.threshold'])
-        self.global_R_scaling = float(SitesWeights['global.R.scaling'])
-        self.n_site_categories = int(SitesWeights['n.site.categories'])
+        self.rejection_threshold = int(sites_weights['obs.rejection.threshold'])
+        self.global_R_scaling = float(sites_weights['global.R.scaling'])
+        self.n_site_categories = int(sites_weights['n.site.categories'])
 
         logging.debug('Model-data mismatch rejection threshold: %d ' % self.rejection_threshold)
         logging.warning('Model-data mismatch scaling factor     : %f ' % self.global_R_scaling)
         logging.debug('Model-data mismatch site categories    : %d ' % self.n_site_categories)
    
-        cats = [k for k in SitesWeights.keys() if 'site.category' in k] 
+        cats = [k for k in sites_weights.keys() if 'site.category' in k] 
 
-        SiteCategories = {}
+        site_categories = {}
         for key in cats:
-            name, error, may_localize, may_reject = SitesWeights[key].split(';')
+            name, error, may_localize, may_reject = sites_weights[key].split(';')
             name = name.strip().lower()
             error = float(error)
             may_reject = ("TRUE" in may_reject.upper())
             may_localize = ("TRUE" in may_localize.upper())
-            SiteCategories[name] = {'category': name, 'error': error, 'may_localize': may_localize, 'may_reject': may_reject}
+            site_categories[name] = {'category': name, 'error': error, 'may_localize': may_localize, 'may_reject': may_reject}
 
-        SiteInfo = {}
-        SiteMove = {}
-        SiteHourly = {}   # option added to include only certain hours of the day (for e.g. PAL) IvdL
-        for key, value in SitesWeights.iteritems():
+        site_info = {}
+        site_move = {}
+        site_hourly = {}   # option added to include only certain hours of the day (for e.g. PAL) IvdL
+        for key, value in sites_weights.iteritems():
             if 'co2_' in key or 'sf6' in key:  # to be fixed later, do not yet know how to parse valid keys from rc-files yet.... WP
                 sitename, sitecategory = key, value
                 sitename = sitename.strip()
                 sitecategory = sitecategory.split()[0].strip().lower()
-                SiteInfo[sitename] = SiteCategories[sitecategory]
+                site_info[sitename] = site_categories[sitecategory]
             if 'site.move' in key:
                 identifier, latmove, lonmove = value.split(';')
-                SiteMove[identifier.strip()] = (float(latmove), float(lonmove))
+                site_move[identifier.strip()] = (float(latmove), float(lonmove))
             if 'site.hourly' in key:
                 identifier, hourfrom, hourto = value.split(';')
-                SiteHourly[identifier.strip()] = (int(hourfrom), int(hourto))
+                site_hourly[identifier.strip()] = (int(hourfrom), int(hourto))
 
         for obs in self.datalist:  # loop over all available data points
 
@@ -314,43 +314,43 @@ class ObsPackObservations(Observations):
             identifier = obs.code
             species, site, method, lab, datasetnr = identifier.split('_')
 
-            if SiteInfo.has_key(identifier):
-                if SiteHourly.has_key(identifier):
-                    hourf, hourt = SiteHourly[identifier]
+            if site_info.has_key(identifier):
+                if site_hourly.has_key(identifier):
+                    hourf, hourt = site_hourly[identifier]
                     if int(obs.xdate.hour) >= hourf and int(obs.xdate.hour) <= hourt:
                         logging.warning("Observations in hourly dataset INCLUDED, while sampling time %s was between %s:00-%s:00"%(obs.xdate.time(),hourf,hourt))
                     else:
                         logging.warning("Observation in hourly dataset EXCLUDED, while sampling time %s was outside %s:00-%s:00"%(obs.xdate.time(),hourf,hourt))
                         exclude_hourly = True
-                if SiteInfo[identifier]['category'] == 'do-not-use' or exclude_hourly:
+                if site_info[identifier]['category'] == 'do-not-use' or exclude_hourly:
                     logging.warning("Observation found (%s, %d), but not used in assimilation !!!" % (identifier, obs.id))
-                    obs.mdm = SiteInfo[identifier]['error'] * self.global_R_scaling
-                    obs.may_localize = SiteInfo[identifier]['may_localize']
-                    obs.may_reject = SiteInfo[identifier]['may_reject']
+                    obs.mdm = site_info[identifier]['error'] * self.global_R_scaling
+                    obs.may_localize = site_info[identifier]['may_localize']
+                    obs.may_reject = site_info[identifier]['may_reject']
                     obs.flag = 99
                 else:
                     logging.debug("Observation found (%s, %d)" % (identifier, obs.id))
-                    obs.mdm = SiteInfo[identifier]['error'] * self.global_R_scaling
-                    obs.may_localize = SiteInfo[identifier]['may_localize']
-                    obs.may_reject = SiteInfo[identifier]['may_reject']
+                    obs.mdm = site_info[identifier]['error'] * self.global_R_scaling
+                    obs.may_localize = site_info[identifier]['may_localize']
+                    obs.may_reject = site_info[identifier]['may_reject']
                     obs.flag = 0
 
             else:
-                logging.warning("Observation NOT found (%s, %d), please check sites.rc file (%s)  !!!" % (identifier, obs.id, self.SitesFile))
+                logging.warning("Observation NOT found (%s, %d), please check sites.rc file (%s)  !!!" % (identifier, obs.id, self.sites_file))
 
-            if SiteMove.has_key(identifier):
+            if site_move.has_key(identifier):
 
-                movelat, movelon = SiteMove[identifier]
+                movelat, movelon = site_move[identifier]
                 obs.lat = obs.lat + movelat
                 obs.lon = obs.lon + movelon
 
                 logging.warning("Observation location for (%s, %d), is moved by %3.2f degrees latitude and %3.2f degrees longitude" % (identifier, obs.id, movelat, movelon))
 
-        # Add SiteInfo dictionary to the Observations object for future use
+        # Add site_info dictionary to the Observations object for future use
 
-        self.SiteInfo = SiteInfo
-        self.SiteMove = SiteMove
-        self.SiteHourly = SiteHourly
+        self.site_info = site_info
+        self.site_move = site_move
+        self.site_hourly = site_hourly
 
         logging.debug("Added Model Data Mismatch to all samples ")
 
@@ -372,7 +372,7 @@ class ObsPackObservations(Observations):
             f.close()
             #return outfile
 
-        for key, value in self.SiteMove.iteritems():
+        for key, value in self.site_move.iteritems():
             msg = "Site is moved by %3.2f degrees latitude and %3.2f degrees longitude" % value 
             f.AddAttribute(key, msg)
 
diff --git a/gridded/da/carbondioxide/statevector.py b/gridded/da/carbondioxide/statevector.py
index e7416eb4cffa7d57048a9c15435aeab9f9df3120..57eb6c138f5ca3399d839c0a0c5a615db60ab99e 100755
--- a/gridded/da/carbondioxide/statevector.py
+++ b/gridded/da/carbondioxide/statevector.py
@@ -114,13 +114,12 @@ class CO2StateVector(StateVector):
         f = io.CT_Read(filename, 'read')
 
         for n in range(self.nlag):
-
             if qual == 'opt':
-                MeanState = f.get_variable('xac_%02d' % (n + 1))
+                meanstate = f.get_variable('xac_%02d' % (n + 1))
                 EnsembleMembers = f.get_variable('adX_%02d' % (n + 1))
 
             elif qual == 'prior':
-                MeanState = f.get_variable('xpc_%02d' % (n + 1))
+                meanstate = f.get_variable('xpc_%02d' % (n + 1))
                 EnsembleMembers = f.get_variable('pdX_%02d' % (n + 1))
 
             if not self.EnsembleMembers[n] == []:
@@ -128,9 +127,9 @@ class CO2StateVector(StateVector):
                 logging.warning('Existing ensemble for lag=%d was removed to make place for newly read data' % (n + 1))
 
             for m in range(self.nmembers):
-                NewMember = EnsembleMember(m)
-                NewMember.ParameterValues = EnsembleMembers[ m, :].flatten() + MeanState  # add the mean to the deviations to hold the full parameter values
-                self.EnsembleMembers[n].append(NewMember)
+                newmember = EnsembleMember(m)
+                newmember.ParameterValues = EnsembleMembers[m, :].flatten() + meanstate  # add the mean to the deviations to hold the full parameter values
+                self.EnsembleMembers[n].append(newmember)
 
         f.close()
 
diff --git a/gridded/da/co2gridded/dasystem.py b/gridded/da/co2gridded/dasystem.py
index a21328efc77a6fd3e69ffd5fa56e4c68b4fd5f6c..25018ea98cfdd27bdbab4fdec94cf88eac5dd2fe 100755
--- a/gridded/da/co2gridded/dasystem.py
+++ b/gridded/da/co2gridded/dasystem.py
@@ -26,10 +26,10 @@ class CO2GriddedDaSystem(DaSystem):
         to the dictionary
         """
 
-        self.Identifier = 'CarbonTracker Gridded CO2'    # the identifier gives the platform name
+        self.ID = 'CarbonTracker Gridded CO2'    # the identifier gives the platform name
         self.load_rc(rcfilename)
 
-        logging.debug('Data Assimilation System initialized: %s' % self.Identifier)
+        logging.debug('Data Assimilation System initialized: %s' % self.ID)
 
     def validate(self):
         """ 
diff --git a/gridded/da/co2gridded/statevector.py b/gridded/da/co2gridded/statevector.py
index 196e474541693194bdd1bb374e461dd2dd5ba629..7c24f099f8d80bf4899b65623ff8c10d1da7344e 100755
--- a/gridded/da/co2gridded/statevector.py
+++ b/gridded/da/co2gridded/statevector.py
@@ -11,14 +11,14 @@ File created on 28 Jul 2010.
 
 import os
 import sys
+import logging
+import numpy as np
+
 sys.path.append(os.getcwd())
 sys.path.append('../../')
 
-
-import logging
+import da.tools.io4 as io
 from da.baseclasses.statevector import StateVector, EnsembleMember
-import numpy as np
-
 identifier = 'CarbonTracker Gridded Statevector '
 version = '0.0'
 
@@ -33,7 +33,7 @@ class CO2GriddedStateVector(StateVector):
             The argument is thus referring to the lagged state vector as [1,2,3,4,5,..., nlag]
         """    
 
-        import da.tools.io4 as io
+        
         try:
             import matplotlib.pyplot as plt
         except:
@@ -128,8 +128,6 @@ class CO2GriddedStateVector(StateVector):
         randstate = np.random.get_state()
 
         for matrix in covariancematrixlist:
-
-
             # Make a cholesky decomposition of the covariance matrix
 
             _, s, _ = np.linalg.svd(matrix)
@@ -168,29 +166,29 @@ class CO2GriddedStateVector(StateVector):
 
         # Create mean values 
 
-        NewMean = np.ones(self.nparams, float) # standard value for a new time step is 1.0
+        new_mean = np.ones(self.nparams, float) # standard value for a new time step is 1.0
 
         # If this is not the start of the filter, average previous two optimized steps into the mix
 
-        if lag == self.nlag and self.nlag >= 3:
-            NewMean += self.EnsembleMembers[lag - 2][0].ParameterValues + \
-                                           self.EnsembleMembers[lag - 3][0].ParameterValues 
-            NewMean = NewMean / 3.0
+        if lag == self.nlag - 1 and self.nlag >= 3:
+            new_mean += self.EnsembleMembers[lag - 1][0].ParameterValues + \
+                                           self.EnsembleMembers[lag - 2][0].ParameterValues 
+            new_mean = new_mean / 3.0
 
         # Create the first ensemble member with a deviation of 0.0 and add to list
 
-        NewMember = EnsembleMember(0)
-        NewMember.ParameterValues = NewMean.flatten()  # no deviations
-        self.EnsembleMembers[lag - 1].append(NewMember)
+        new_member = EnsembleMember(0)
+        new_member.ParameterValues = new_mean.flatten()  # no deviations
+        self.EnsembleMembers[lag].append(new_member)
 
         # Create members 1:nmembers and add to EnsembleMembers list
 
         for member in range(1, self.nmembers):
-            NewMember = EnsembleMember(member)
-            NewMember.ParameterValues = dev_matrix[:, member - 1] + NewMean
-            self.EnsembleMembers[lag - 1].append(NewMember)
+            new_member = EnsembleMember(member)
+            new_member.ParameterValues = dev_matrix[:, member - 1] + new_mean
+            self.EnsembleMembers[lag].append(new_member)
 
-        logging.debug('%d new ensemble members were added to the state vector # %d' % (self.nmembers, lag))
+        logging.debug('%d new ensemble members were added to the state vector # %d' % (self.nmembers, (lag + 1)))
 
 
 
diff --git a/gridded/da/examples/das.py b/gridded/da/examples/das.py
index 2b58d7bc608a58b4eeaaf2ba89a053d9db1bab9b..250074dbacbc6c1f964a18b60f6fe607e044dd7e 100755
--- a/gridded/da/examples/das.py
+++ b/gridded/da/examples/das.py
@@ -14,15 +14,14 @@ sys.path.append(os.getcwd())
 #################################################################################################
 
 from da.tools.initexit import start_logger, validate_opts_args, parse_options, CycleControl
-from da.tools.pipeline import ensemble_smoother_pipeline
-from da.platform.maunaloa import MaunaloaPlatForm 
+from da.tools.pipeline import ensemble_smoother_pipeline, header, footer
+from da.platform.maunaloa import MaunaloaPlatform 
 from da.carbondioxide.dasystem import CO2DaSystem 
 from da.carbondioxide.statevector import CO2StateVector 
 #from da.carbondioxide.obspack import ObsPackObservations 
 from da.carbondioxide.obs import CO2Observations 
-from da.tm5.observationoperator import TM5ObservationOperator 
 from da.carbondioxide.optimizer import CO2Optimizer
-
+from da.tm5.observationoperator import TM5ObservationOperator
 from da.analysis.expand_fluxes import save_weekly_avg_1x1_data, save_weekly_avg_state_data, save_weekly_avg_tc_data, save_weekly_avg_ext_tc_data
 from da.analysis.expand_mixingratios import write_mixing_ratios
 
@@ -42,7 +41,7 @@ opts, args = validate_opts_args(opts, args)
 DaCycle = CycleControl(opts, args)
 
 
-PlatForm = MaunaloaPlatForm()
+Platform = MaunaloaPlatform()
 DaSystem = CO2DaSystem(DaCycle['da.system.rc'])
 ObsOperator = TM5ObservationOperator(DaCycle['da.obsoperator.rc'])
 #Samples     = ObsPackObservations()
@@ -54,11 +53,10 @@ Optimizer = CO2Optimizer()
 ################### ENTER THE PIPELINE WITH THE OBJECTS PASSED BY THE USER ###############
 ##########################################################################################
 
-from da.tools.pipeline import header, footer
 
 logging.info(header + "Entering Pipeline " + footer) 
 
-ensemble_smoother_pipeline(DaCycle, PlatForm, DaSystem, Samples, StateVector, ObsOperator, Optimizer)
+ensemble_smoother_pipeline(DaCycle, Platform, DaSystem, Samples, StateVector, ObsOperator, Optimizer)
 
 
 ##########################################################################################
diff --git a/gridded/da/examples/dasgridded.py b/gridded/da/examples/dasgridded.py
index c22a95c388ae2c37d6b60c9cbc704561191d6ea1..8ad4cdd857883c0af6d75f4e308b7a3503768ff2 100755
--- a/gridded/da/examples/dasgridded.py
+++ b/gridded/da/examples/dasgridded.py
@@ -16,15 +16,13 @@ sys.path.append(os.getcwd())
 
 from da.tools.initexit import start_logger, parse_options, validate_opts_args, CycleControl
 
-from da.tools.pipeline import ensemble_smoother_pipeline
-from da.platform.maunaloa import MaunaloaPlatForm 
+from da.tools.pipeline import ensemble_smoother_pipeline, header, footer
+from da.platform.maunaloa import MaunaloaPlatform 
 from da.co2gridded.dasystem import CO2GriddedDaSystem 
 from da.co2gridded.statevector import CO2GriddedStateVector 
 from da.carbondioxide.obs import CO2Observations 
-
-from da.tm5.observationoperator import TM5ObservationOperator 
 from da.carbondioxide.optimizer import CO2Optimizer
-from da.tools.pipeline import header, footer
+from da.tm5.observationoperator import TM5ObservationOperator 
 
 from da.analysis.expand_fluxes import save_weekly_avg_1x1_data, save_weekly_avg_state_data, save_weekly_avg_tc_data, save_weekly_avg_ext_tc_data
 from da.analysis.expand_mixingratios import write_mixing_ratios
@@ -43,7 +41,7 @@ opts, args = validate_opts_args(parse_options())
 
 DaCycle = CycleControl(opts, args)
 
-PlatForm = MaunaloaPlatForm()
+Platform = MaunaloaPlatform()
 DaSystem = CO2GriddedDaSystem(DaCycle['da.system.rc'])
 ObsOperator = TM5ObservationOperator(DaCycle['da.obsoperator.rc'])
 Samples = CO2Observations()
@@ -55,7 +53,7 @@ Optimizer = CO2Optimizer()
 ##########################################################################################
 
 logging.info(header + "Entering Pipeline " + footer) 
-ensemble_smoother_pipeline(DaCycle, PlatForm, DaSystem, Samples, StateVector, ObsOperator, Optimizer)
+ensemble_smoother_pipeline(DaCycle, Platform, DaSystem, Samples, StateVector, ObsOperator, Optimizer)
 
 
 ##########################################################################################
diff --git a/gridded/da/examples/dasjet.py b/gridded/da/examples/dasjet.py
index 6f0d28a49bf4434d9ab5eb42bf0916b0f8943bcd..f5ad9b9fc1a362120470f9ada1b6b8f8649d5af3 100755
--- a/gridded/da/examples/dasjet.py
+++ b/gridded/da/examples/dasjet.py
@@ -14,14 +14,13 @@ sys.path.append(os.getcwd())
 #################################################################################################
 
 from da.tools.initexit import start_logger, validate_opts_args, parse_options, CycleControl
-from da.tools.pipeline import ensemble_smoother_pipeline
-from da.platform.jet import JetPlatForm 
+from da.tools.pipeline import ensemble_smoother_pipeline, header, footer
+from da.platform.jet import JetPlatform 
 from da.carbondioxide.dasystem import CO2DaSystem 
 from da.carbondioxide.statevector import CO2StateVector 
 from da.carbondioxide.obs import CO2Observations 
 from da.tm5.observationoperator import TM5ObservationOperator 
 from da.carbondioxide.optimizer import CO2Optimizer
-from da.tools.pipeline import header, footer
 
 
 #################################################################################################
@@ -37,7 +36,7 @@ opts, args = validate_opts_args(parse_options())
 
 DaCycle = CycleControl(opts, args)
 
-PlatForm = JetPlatForm()
+Platform = JetPlatform()
 DaSystem = CO2DaSystem(DaCycle['da.system.rc'])
 ObsOperator = TM5ObservationOperator(DaCycle['da.obsoperator.rc'])
 Samples = CO2Observations()
@@ -51,7 +50,7 @@ Optimizer = CO2Optimizer()
 
 logging.info(header + "Entering Pipeline " + footer) 
 
-ensemble_smoother_pipeline(DaCycle, PlatForm, DaSystem, Samples, StateVector, ObsOperator, Optimizer)
+ensemble_smoother_pipeline(DaCycle, Platform, DaSystem, Samples, StateVector, ObsOperator, Optimizer)
 
 
 ##########################################################################################
diff --git a/gridded/da/platform/capegrim.py b/gridded/da/platform/capegrim.py
index e9f29b0d7ac8f357c13be0edc374d9f148a1ec40..5e6e23753f22f1259b92725a24909b13f115cdb1 100755
--- a/gridded/da/platform/capegrim.py
+++ b/gridded/da/platform/capegrim.py
@@ -11,13 +11,13 @@ File created on 06 Sep 2010.
 
 import logging
 
-from da.baseclasses.platform import PlatForm, std_joboptions
+from da.baseclasses.platform import Platform, std_joboptions
 
-class CapeGrimPlatForm(PlatForm):
+class CapeGrimPlatform(Platform):
     def __init__(self):
 
-        self.Identifier = 'WU capegrim'    # the identifier gives the platform name
-        self.Version = '1.0'     # the platform version used
+        self.ID = 'WU capegrim'    # the identifier gives the platform name
+        self.version = '1.0'     # the platform version used
 
     def give_blocking_flag(self):
         return ""
diff --git a/gridded/da/platform/huygens.py b/gridded/da/platform/huygens.py
index 1961886bb6af9a3ea24b88c8a4c72db2fe3078a0..47727b6b1662dfbae3b41ee8b6d18fe87918e66b 100755
--- a/gridded/da/platform/huygens.py
+++ b/gridded/da/platform/huygens.py
@@ -12,15 +12,15 @@ File created on 06 Sep 2010.
 import logging
 import subprocess
 
-from da.baseclasses.platform import PlatForm
+from da.baseclasses.platform import Platform
 
 std_joboptions = {'jobname':'test', 'jobaccount':'co2', 'jobtype':'serial', 'jobshell':'/bin/sh', 'depends':'', 'jobtime':'24:00:00', 'jobinput':'/dev/null', 'jobnode':'', 'jobtasks':'', 'modulenetcdf':'netcdf/4.1.2', 'networkMPI':''}
 
 
-class HuygensPlatForm(PlatForm):
+class HuygensPlatform(Platform):
     def __init__(self):
-        self.Identifier = 'huygens'    # the identifier gives the platform name
-        self.Version = '1.0'     # the platform version used
+        self.ID = 'huygens'    # the identifier gives the platform name
+        self.version = '1.0'     # the platform version used
 
 
     def give_blocking_flag(self):
@@ -117,7 +117,7 @@ class HuygensPlatForm(PlatForm):
 
 
    
-        msg1 = 'Platform initialized: %s' % self.Identifier      ; logging.info(msg1)
+      #  msg1 = 'Platform initialized: %s' % self.Identifier      ; logging.info(msg1)
 #        #msg2  = '%s version: %s'%(self.Identifier,self.Version) ; logging.info(msg2)
 
 
diff --git a/gridded/da/platform/jet.py b/gridded/da/platform/jet.py
index d6ed74775be1a2b82d4461e2e1f557c31411f13b..be876a883adc8b532fabeff53d7ccc3c353cafa5 100755
--- a/gridded/da/platform/jet.py
+++ b/gridded/da/platform/jet.py
@@ -12,17 +12,17 @@ import os
 import logging
 import subprocess
 
-from da.baseclasses.platform import PlatForm
+from da.baseclasses.platform import Platform
 
 std_joboptions = {'jobname':'test', 'jobaccount':'co2', 'jobnodes':'nserial 1', 'jobshell':'/bin/sh', 'depends':'', 'jobtime':'00:30:00', 'joblog':os.getcwd()}
 
-class JetPlatForm(PlatForm):
+class JetPlatform(Platform):
     def __init__(self):
-        self.Identifier = 'NOAA jet'    # the identifier gives the platform name
-        self.Version = '1.0'     # the platform version used
+        self.ID = 'NOAA jet'    # the identifier gives the platform name
+        self.version = '1.0'     # the platform version used
 
-        logging.debug('%s platform object initialized' % self.Identifier)
-        logging.debug('%s version: %s' % (self.Identifier, self.Version))
+        logging.debug('%s platform object initialized' % self.ID)
+        logging.debug('%s version: %s' % (self.ID, self.version))
 
 
     def get_job_template(self, joboptions={}, block=False):
diff --git a/gridded/da/platform/maunaloa.py b/gridded/da/platform/maunaloa.py
index cbba8fc7746fcd131fc797236cf9cba0190d12de..7d886743739ecc64811802244445fb420b906ab0 100755
--- a/gridded/da/platform/maunaloa.py
+++ b/gridded/da/platform/maunaloa.py
@@ -9,12 +9,12 @@ File created on 06 Sep 2010.
 
 """
 
-from da.baseclasses.platform import PlatForm, std_joboptions
+from da.baseclasses.platform import Platform, std_joboptions
 
-class MaunaloaPlatForm(PlatForm):
+class MaunaloaPlatform(Platform):
     def __init__(self):
-        self.Identifier = 'WU maunaloa'    # the identifier gives the platform name
-        self.Version = '1.0'     # the platform version used
+        self.ID = 'WU maunaloa'    # the identifier gives the platform name
+        self.version = '1.0'     # the platform version used
 
     def give_blocking_flag(self):
         return ""
diff --git a/gridded/da/tools/initexit.py b/gridded/da/tools/initexit.py
index 1406633d45cc8170551374d21b8c4eb8ba37dcc0..c37af50b23eee094b42c4cbc656ba9a5f8737fba 100755
--- a/gridded/da/tools/initexit.py
+++ b/gridded/da/tools/initexit.py
@@ -99,14 +99,14 @@ class CycleControl(dict):
         in :func:`~da.tools.initexit.CycleControl.parse_options`
 
         """
-
-        self.load_rc(args['rc'])
+        rcfile = args['rc']
+        self.load_rc(rcfile)
         self.validate_rc()
         self.opts = opts
 
         # Add some useful variables to the rc-file dictionary
 
-        self['jobrcfilename'] = self.RcFileName
+        self['jobrcfilename'] = rcfile
         self['dir.da_submit'] = os.getcwd()
         self['da.crash.recover'] = '-r' in opts
         self['verbose'] = '-v' in opts
@@ -115,19 +115,17 @@ class CycleControl(dict):
         self.OutputFileList = [] # List of files needed for output, to be extended later
 
 
-    def load_rc(self, RcFileName):
+    def load_rc(self, rcfilename):
         """ 
         This method loads a DA Cycle rc-file with settings for this simulation 
         """
 
-        rcdata = rc.read(RcFileName)
+        rcdata = rc.read(rcfilename)
         for k, v in rcdata.iteritems():
             self[k] = v
-        self.RcFileName = RcFileName
-        self.DaRcLoaded = True
-
-        logging.info('DA Cycle rc-file (%s) loaded successfully' % self.RcFileName)
 
+        logging.info('DA Cycle rc-file (%s) loaded successfully' % rcfilename)
+        
 
     def validate_rc(self):
         """ 
@@ -473,18 +471,18 @@ class CycleControl(dict):
         
         # We make a copy of the current DaCycle object, and modify the start + end dates and restart value
 
-        newDaCycle = copy.deepcopy(self)
-        newDaCycle['da.restart.tstamp'] = self['time.start']
-        newDaCycle.advance_cycle_times()
-        newDaCycle['time.restart'] = True
+        new_dacycle = copy.deepcopy(self)
+        new_dacycle['da.restart.tstamp'] = self['time.start']
+        new_dacycle.advance_cycle_times()
+        new_dacycle['time.restart'] = True
         
         # Create the name of the rc-file that will hold this new input, and write it
 
         #fname = os.path.join(self['dir.exec'], 'da_runtime.rc')  # current exec dir holds next rc file
         
-        fname = os.path.join(self['dir.restart'], 'da_runtime_%s.rc' % newDaCycle['time.start'].strftime('%Y%m%d'))#advanced time
+        fname = os.path.join(self['dir.restart'], 'da_runtime_%s.rc' % new_dacycle['time.start'].strftime('%Y%m%d'))#advanced time
         
-        rc.write(fname, newDaCycle)
+        rc.write(fname, new_dacycle)
         logging.debug('Wrote new da_runtime.rc (%s) to exec dir' % fname)
 
         # The rest is info needed for a system restart, so it modifies the current DaCycle object (self)
diff --git a/gridded/da/tools/pipeline.py b/gridded/da/tools/pipeline.py
index c386a490be66dc2b1220721a296d89f3f1e3e937..0dc321e8ac5ea1149513d7f6022353f7c9df4637 100755
--- a/gridded/da/tools/pipeline.py
+++ b/gridded/da/tools/pipeline.py
@@ -20,12 +20,12 @@ import copy
 header = '\n\n    ***************************************   '
 footer = '    *************************************** \n  '
 
-def ensemble_smoother_pipeline(DaCycle, PlatForm, DaSystem, Samples, StateVector, ObsOperator, Optimizer):
+def ensemble_smoother_pipeline(DaCycle, Platform, DaSystem, Samples, StateVector, ObsOperator, Optimizer):
     """ The main point of entry for the pipeline """
     sys.path.append(os.getcwd())
 
     logging.info(header + "Initializing current cycle" + footer)
-    start_job(DaCycle, DaSystem, PlatForm, StateVector, Samples, ObsOperator)
+    start_job(DaCycle, DaSystem, Platform, StateVector, Samples, ObsOperator)
 
     prepare_state(DaCycle, StateVector)
     sample_state(DaCycle, Samples, StateVector, ObsOperator)
@@ -37,12 +37,12 @@ def ensemble_smoother_pipeline(DaCycle, PlatForm, DaSystem, Samples, StateVector
     save_and_submit(DaCycle, StateVector)    
     logging.info("Cycle finished...exiting pipeline")
 
-def forward_pipeline(DaCycle, PlatForm, DaSystem, Samples, StateVector, ObsOperator, Optimizer):
+def forward_pipeline(DaCycle, Platform, DaSystem, Samples, StateVector, ObsOperator, Optimizer):
     """ The main point of entry for the pipeline """
     sys.path.append(os.getcwd())
 
     logging.info(header + "Initializing current cycle" + footer)
-    start_job(DaCycle, DaSystem, PlatForm, StateVector, Samples, ObsOperator)                   
+    start_job(DaCycle, DaSystem, Platform, StateVector, Samples, ObsOperator)                   
 
     # Read from other simulation and write priors, then read posteriors and propagate
     
diff --git a/gridded/template.py b/gridded/template.py
index 2ee2ef8ee7a7aeb06cd46eb9b8904f8208f2c742..b4885d31f88d2937d9a304c1f78e5f27fefba09b 100755
--- a/gridded/template.py
+++ b/gridded/template.py
@@ -15,7 +15,7 @@ sys.path.append(os.getcwd())
 
 from da.tools.initexit import start_logger, validate_opts_args, parse_options, CycleControl 
 from da.tools.pipeline import ensemble_smoother_pipeline, header, footer
-from da.platform.huygens import HuygensPlatForm 
+from da.platform.huygens import HuygensPlatform 
 from da.carbondioxide.dasystem import CO2DaSystem 
 from da.carbondioxide.optimizer import CO2Optimizer
 from da.carbondioxide.obspack_geocarbon import ObsPackObservations
@@ -48,7 +48,7 @@ DaCycle = CycleControl(opts, args)
 ###########################################################################################
 
 
-PlatForm = HuygensPlatForm()
+Platform = HuygensPlatform()
 DaSystem = CO2DaSystem(DaCycle['da.system.rc'])
 ObsOperator = TM5ObservationOperator(DaCycle['da.obsoperator.rc'])
 Samples = ObsPackObservations()
@@ -64,7 +64,7 @@ Optimizer = CO2Optimizer()
 
 logging.info(header + "Entering Pipeline " + footer) 
 
-ensemble_smoother_pipeline(DaCycle, PlatForm, DaSystem, Samples, StateVector, ObsOperator, Optimizer)
+ensemble_smoother_pipeline(DaCycle, Platform, DaSystem, Samples, StateVector, ObsOperator, Optimizer)
 
 
 ##########################################################################################