diff --git a/da/baseclasses/statevector.py b/da/baseclasses/statevector.py
index 94cb6e97da9ced531eda5b50bcfdcb120cf8acff..7e5107b0e151a3ab460bd6c9b96b50448e666d12 100755
--- a/da/baseclasses/statevector.py
+++ b/da/baseclasses/statevector.py
@@ -34,6 +34,7 @@ your own baseclass StateVector we refer to :ref:`tut_chapter5`.
 """
 
 import os
+import sys
 import logging
 import numpy as np
 from datetime import timedelta
@@ -162,46 +163,14 @@ class StateVector(object):
             self.ensemble_members[n] = []
 
 
-        # This specifies the file to read with the gridded mask at 1x1 degrees. Each gridbox holds a number that specifies the parametermember
-        #  that maps onto it. From this map, a dictionary is created that allows a reverse look-up so that we can map parameters to a grid.
-
-        mapfile = os.path.join(dacycle.dasystem['regionsfile'])
-        ncf = io.ct_read(mapfile, 'read')
-        self.gridmap = ncf.get_variable('regions')
-        self.tcmap = ncf.get_variable('transcom_regions')
-        ncf.close()
-
-        logging.debug("A TransCom  map on 1x1 degree was read from file %s" % dacycle.dasystem['regionsfile'])
-        logging.debug("A parameter map on 1x1 degree was read from file %s" % dacycle.dasystem['regionsfile'])
-
-        # Create a dictionary for state <-> gridded map conversions
-
-        nparams = self.gridmap.max()
+        self.gridmap = np.random.randint(low=1,high=self.nparams+1,size=(180,360,))
         self.griddict = {}
-        for r in range(1, int(nparams) + 1):
-            sel = np.nonzero(self.gridmap.flat == r)
+        for r in range(1, self.nparams+1):
+            sel = np.nonzero(self.gridmap.flat == r) 
             if len(sel[0]) > 0: 
                 self.griddict[r] = sel
-
         logging.debug("A dictionary to map grids to states and vice versa was created")
 
-        # Create a matrix for state <-> TransCom conversions
-
-        self.tcmatrix = np.zeros((self.nparams, 23), 'float') 
-
-        for r in range(1, self.nparams + 1):
-            sel = np.nonzero(self.gridmap.flat == r)
-            if len(sel[0]) < 1: 
-                continue
-            else:
-                n_tc = set(self.tcmap.flatten().take(sel[0]))
-                if len(n_tc) > 1: 
-                    logging.error("Parameter %d seems to map to multiple TransCom regions (%s), I do not know how to handle this" % (r, n_tc))
-                    raise ValueError
-                self.tcmatrix[r - 1, n_tc.pop() - 1] = 1.0
-
-        logging.debug("A matrix to map states to TransCom regions and vice versa was created")
-
         # Create a mask for species/unknowns
 
         self.make_species_mask()
@@ -230,7 +199,7 @@ class StateVector(object):
             logging.debug("   ->    %s" % k)
 
 
-    def make_new_ensemble(self, lag, covariancematrix=None):
+    def make_new_ensemble(self, lag, covariancematrix=[None]):
         """ 
         :param lag: an integer indicating the time step in the lag order
         :param covariancematrix: a matrix to draw random values from
diff --git a/da/rc/carbontracker_random.rc b/da/rc/carbontracker_random.rc
index 2b8b5c42e95e2d9da64e00fc45fbc711b2b86110..09c341716ad0da961f7a8d385c8936739eab1fbc 100644
--- a/da/rc/carbontracker_random.rc
+++ b/da/rc/carbontracker_random.rc
@@ -11,15 +11,7 @@
 ! You should have received a copy of the GNU General Public License along with this 
 ! program. If not, see <http://www.gnu.org/licenses/>. 
 
-datadir         : /Storage/CO2/carbontracker/input/ctdas_2016/
-ocn.covariance  : ${datadir}/oceans/oif/cov_ocean.2000.01.nc 
-deltaco2.prefix : oif_p3_era40.dpco2
-bio.cov.dir     : ${datadir}/covariances/gridded_NH/
-bio.cov.prefix  : cov_ecoregion
-
-regtype         : gridded_oif30
-nparameters     : 9835 
+nparameters     : 100 
 random.seed     : 4385
-regionsfile     : ${datadir}/covariances/gridded_NH/griddedNHparameters.nc
 
 obs.sites.rc   : Random
diff --git a/da/tools/initexit.py b/da/tools/initexit.py
index 4a29e1dd50cabc1021945a85363f7e25a474fb65..3aa1b44bc54f4a33b94bee2db4aea934854a51a5 100755
--- a/da/tools/initexit.py
+++ b/da/tools/initexit.py
@@ -332,25 +332,20 @@ class CycleControl(dict):
 
             strippedname = os.path.split(self['jobrcfilename'])[-1]
             self['jobrcfilename'] = os.path.join(self['dir.exec'], strippedname)
-#            shutil.copy(os.path.join(self.dasystem['regionsfile']),os.path.join(self['dir.exec'],'da','analysis','copied_regions.nc'))
-            logging.info('Copied regions file to the analysis directory: %s'%os.path.join(self.dasystem['regionsfile'])) 
+
             if 'extendedregionsfile' in self.dasystem:
 #                shutil.copy(os.path.join(self.dasystem['extendedregionsfile']),os.path.join(self['dir.exec'],'da','analysis','copied_regions_extended.nc')) 
                 logging.info('Copied extended regions file to the analysis directory: %s'%os.path.join(self.dasystem['extendedregionsfile'])) 
-            else: 
-#                shutil.copy(os.path.join(self['dir.exec'],'da','analysis','olson_extended.nc'),os.path.join(self['dir.exec'],'da','analysis','copied_regions_extended.nc')) 
-                logging.info('Copied extended regions within the analysis directory: %s'%os.path.join(self['dir.exec'],'da','analysis','olson_extended.nc')) 
-            for filename in glob.glob(os.path.join(self['dir.exec'],'da','analysis','*.pickle')):
-                logging.info('Deleting pickle file %s to make sure the correct regions are used'%os.path.split(filename)[1])
-                os.remove(filename) 
-            for filename in glob.glob(os.path.join(self['dir.exec'],'*.pickle')):
-                logging.info('Deleting pickle file %s to make sure the correct regions are used'%os.path.split(filename)[1])
-                os.remove(filename) 
+            #for filename in glob.glob(os.path.join(self['dir.exec'],'da','analysis','*.pickle')):
+            #    logging.info('Deleting pickle file %s to make sure the correct regions are used'%os.path.split(filename)[1])
+            #    os.remove(filename) 
+            #for filename in glob.glob(os.path.join(self['dir.exec'],'*.pickle')):
+            #    logging.info('Deleting pickle file %s to make sure the correct regions are used'%os.path.split(filename)[1])
+            #    os.remove(filename) 
             if 'random.seed.init' in self.dasystem:
                 self.read_random_seed(True)
 
         self.parse_times()
-        #self.write_rc(self['jobrcfilename'])
 
     def setup_file_structure(self):
         """ 
diff --git a/templates/template.py b/templates/template.py
index 57ca4fbdfa9bb1be5faf17c754f3b1325ac0fbd5..a305d6ffea7ae37355d4534786582bf449786725 100755
--- a/templates/template.py
+++ b/templates/template.py
@@ -29,7 +29,7 @@ from da.tools.initexit import start_logger, validate_opts_args, parse_options, C
 from da.tools.pipeline import ensemble_smoother_pipeline, header, footer
 from da.platform.capegrim import CapeGrimPlatform
 from da.baseclasses.dasystem import DaSystem
-from da.co2gridded.statevector import CO2GriddedStateVector 
+from da.baseclasses.statevector import StateVector 
 from da.baseclasses.obs import Observations
 from da.baseclasses.optimizer import Optimizer
 from da.baseclasses.observationoperator import RandomizerObservationOperator
@@ -54,7 +54,7 @@ platform = CapeGrimPlatform()
 dasystem = DaSystem(dacycle['da.system.rc'])
 obsoperator = RandomizerObservationOperator(dacycle['da.obsoperator.rc'])
 samples     = Observations()
-statevector = CO2GriddedStateVector()
+statevector = StateVector()
 optimizer = Optimizer()
 
 ##########################################################################################