diff --git a/Convert-EUProcessingFactorsDB/Convert-EUProcessingFactorsDB.py b/Convert-EUProcessingFactorsDB/Convert-EUProcessingFactorsDB.py
index 7a45068127d677d89710c1e18b7dd69e7888efcd..87020cb959c7d1a67643ab2e0c788f7b69cf7947 100644
--- a/Convert-EUProcessingFactorsDB/Convert-EUProcessingFactorsDB.py
+++ b/Convert-EUProcessingFactorsDB/Convert-EUProcessingFactorsDB.py
@@ -40,7 +40,7 @@ dataset.add(
     name='processing_type',
     short_argument='-t',
     help='The (input) processing type file - '
-        + 'format: csv (Comma Seperated).',
+         + 'format: csv (Comma Seperated).',
     default_name='ProcessingTypes.csv',
     default_dir='Input',
     direction='Input')
@@ -124,11 +124,11 @@ dataset.verbose(1, 'Input file : {file}; {version}; {props}'.format(
 # Use this file only if called explictly from command line
 # and of course, it has to exist. The -g is enough to trigger the default
 if dataset.args.food_composition_file is not None \
-    and dataset.food_composition.file.exist:
+   and dataset.food_composition.file.exist:
     dataset.food_composition.load(sheet_name='FoodTranslation')
     dataset.verbose(1, 'Input file : {file}; {props}'.format(
-    file=dataset.food_composition.file.path,
-    props=dataset.food_composition.properties))
+        file=dataset.food_composition.file.path,
+        props=dataset.food_composition.properties))
 
 
 #############################################################################
@@ -232,11 +232,11 @@ if dataset.food_composition.sheet is not None:
     fcs = dataset.food_composition.sheet[
         dataset.food_composition.sheet['idToFood'].str.startswith('P')]
     # Now split the first column
-    fs=pd.DataFrame()
+    fs = pd.DataFrame()
     # Bit of a mess, to combine again.
     fs[['idFromFood-Left', 'idFromFood-Right']] = \
         fcs['idFromFood'].str.rsplit('-', n=1, expand=True)
-    fcs=fcs.merge(fs, left_index=True, right_index=True)
+    fcs = fcs.merge(fs, left_index=True, right_index=True)
     # New columns is properly joined now
     fcs['idToFood-PC'] = fcs.loc[:, ('idToFood', 'idFromFood-Right')].apply(
         lambda x: '-'.join(x.dropna()), axis=1)
@@ -249,7 +249,7 @@ if dataset.food_composition.sheet is not None:
     efsa_combined.loc[
         (efsa_combined['idToFood-PC'].notna()),
         'idFoodProcessed'] = efsa_combined['idFromFood']
-    print(fcs)
+
 #############################################################################
 # Phase 3. Exporting the data.
 # Seems obvious what to do here.
diff --git a/Convert-EUProcessingFactorsDB/mcra.py b/Convert-EUProcessingFactorsDB/mcra.py
index f6740297186c84bba9a1da1f4a76e795e4480eee..596a0fc0b820609537edf90158c171106dfd10c5 100644
--- a/Convert-EUProcessingFactorsDB/mcra.py
+++ b/Convert-EUProcessingFactorsDB/mcra.py
@@ -186,7 +186,7 @@ class DataSheet:
             '  * Modified: {mod}'.format(mod=self.file.modified))
         self.add_reportline(
             '  * File size: {size_str} ({size} B)'.format(
-                size_str=self.file.size_string,size=self.file.size))
+                size_str=self.file.size_string, size=self.file.size))
         self.add_reportline(
             '  * Hash: {hash})'.format(hash=self.file.hash))
         self.report = self.report+temp
@@ -320,12 +320,11 @@ class DataSheet:
         if self.file.extension == '.zip':
             if to is None:
                 shutil.copyfile(path,
-                    os.path.join(
-                        self.file.zippath,
-                        os.path.split(path)[1]))
+                                os.path.join(self.file.zippath,
+                                             os.path.split(path)[1]))
             else:
                 shutil.copyfile(path,
-                    os.path.join(self.file.zippath, to))
+                                os.path.join(self.file.zippath, to))
 
 
 class DataSet:
@@ -456,17 +455,19 @@ class DataSet:
                 #
                 if dataset.autoload:
                     if getattr(self.args, datasetname+'_file') is None \
-                        and not dataset.file.necessary:
+                       and not dataset.file.necessary:
                         # Don't load files which are not necessary and not
                         # explictly called from command line.
-                        self.verbose(3, 'Not loading {file}.'.format(file=
-                            dataset.file.path))
+                        self.verbose(3, 'Not loading {file}.'.format(
+                            file=dataset.file.path))
                     else:
                         dataset.load()
                         if dataset.file.exist:
-                            self.verbose(1, 'Input file : {file}; {props}'.format(
-                                file=dataset.file.path,
-                                props=dataset.properties))
+                            self.verbose(
+                                1,
+                                'Input file : {file}; {props}'.format(
+                                    file=dataset.file.path,
+                                    props=dataset.properties))
                             # High verbosity, dump data.
                             self.verbose(3, dataset.sheet)
             else: