diff --git a/libs/baseclass/instance_card.py b/libs/baseclass/instance_card.py
index 3b8410a77ae3584cadddf6a85bffed0e327a259e..20438452de5bd43ea5dd3bbfa898109247b7d10d 100644
--- a/libs/baseclass/instance_card.py
+++ b/libs/baseclass/instance_card.py
@@ -1,10 +1,16 @@
 import os.path
+from tkinter import Widget
 
 from kivy.app import App
+from kivy.uix.image import Image
 from kivy.properties import StringProperty, ObjectProperty
 
 from kivymd.uix.behaviors import RoundedRectangularElevationBehavior
+from kivymd.uix.boxlayout import MDBoxLayout
+from kivymd.uix.button import MDFlatButton
 from kivymd.uix.card import MDCard
+from kivymd.uix.dialog import MDDialog
+from kivymd.uix.label import MDLabel
 
 
 class InstanceCard(MDCard, RoundedRectangularElevationBehavior):
@@ -17,9 +23,85 @@ class InstanceCard(MDCard, RoundedRectangularElevationBehavior):
 
         self.ids.image.source = os.path.join(self.app.trial_path, self.instance['properties']['trial'].lower(),
                                              self.instance['properties']['batch'],
-                                             'instances', str(self.instance['id'])) + '.png'
+                                             'instances', str(self.instance['id'])) + '.cropped.png'
+
+        self. peduncle_path = os.path.join(self.app.trial_path, self.instance['properties']['trial'].lower(),
+                                    self.instance['properties']['batch'],
+                                    'instances', str(self.instance['id'])) + '.peduncle.png'
+
+        traits = self.instance['traits']
 
         if self.instance['properties']['orientation'] == 'up':
-            self.ids.peduncle.text = 'Peduncle Scar Size: ' + str(self.instance['traits']['peduncle']['mm2']) + ' mm2'
-        else:
-            self.ids.peduncle.text = 'Trait 1: Value'
\ No newline at end of file
+            self.ids.traits.add_widget(
+                MDLabel(
+                    text='Peduncle Scar Size: {} mm2'.format(traits['peduncle']['mm2']),
+                    adaptive_height=True
+                )
+            )
+
+            self.ids.images.width = self.ids.images.width + 192
+            self.ids.images.add_widget(
+                Image(
+                    height=168,
+                    allow_stretch=True,
+                    pos_hint={'top': 0.9375},
+                    source=os.path.join(self.app.trial_path, self.instance['properties']['trial'].lower(),
+                                        self.instance['properties']['batch'],
+                                        'instances', str(self.instance['id'])) + '.peduncle.png'
+                )
+            )
+
+        self.ids.images.width = self.ids.images.width + 192
+        self.ids.images.add_widget(
+            Image(
+                height=168,
+                allow_stretch=True,
+                pos_hint={'top': 0.9375},
+                source=os.path.join(self.app.trial_path, self.instance['properties']['trial'].lower(),
+                                    self.instance['properties']['batch'],
+                                    'instances', str(self.instance['id'])) + '.ellipse.png'
+            )
+        )
+
+        self.ids.traits.add_widget(
+            MDLabel(
+                text='Hue: {}'.format(round(traits['color']['hue'], 2)),
+                adaptive_height=True
+            )
+        )
+
+        if self.instance['properties']['orientation'] == 'up' or self.instance['properties']['orientation'] == 'down':
+            self.ids.traits.add_widget(
+                MDLabel(
+                    text='Short Axis: {} mm'.format(round(traits['dimensions']['shortAxis'], 2)),
+                    adaptive_height=True
+                )
+            )
+            self.ids.traits.add_widget(
+                MDLabel(
+                    text='Long Axis: {} mm'.format(round(traits['dimensions']['longAxis'], 2)),
+                    adaptive_height=True
+                )
+            )
+
+        if self.instance['properties']['orientation'] == 'side':
+            self.ids.traits.add_widget(
+                MDLabel(
+                    text='Height: {} mm'.format(round(traits['dimensions']['height'], 2)),
+                    adaptive_height=True
+                )
+            )
+
+        self.ids.traits.add_widget(
+            MDLabel(
+                text='Batch Volume: {} mm3'.format(round(traits['dimensions']['batchVolume'], 2)),
+                adaptive_height=True
+            )
+        )
+
+        self.ids.traits.add_widget(
+            MDLabel(
+                text='Batch Shape Ratio: {} mm3'.format(round(traits['dimensions']['shapeRatio'], 2)),
+                adaptive_height=True
+            )
+        )
\ No newline at end of file
diff --git a/libs/baseclass/process.py b/libs/baseclass/process.py
index 2c0255621dfd021b5214a1dd02140f8ce7b23010..81965468af83e72b129bb4949ea42db057c4b006 100644
--- a/libs/baseclass/process.py
+++ b/libs/baseclass/process.py
@@ -20,8 +20,9 @@ from sklearn.linear_model import LinearRegression
 from sklearn.neighbors import KNeighborsClassifier
 from sklearn.model_selection import train_test_split
 
-from libs.vision.peduncle import ANNMaskerInfer
-from libs.vision.segmenter import Segmentron
+# from libs.vision.segmenter import Segmentron
+from libs.vision.peduncle import PeduncleSegmentron
+from libs.vision.tray import FruitSegmentron
 
 
 class Process(Screen):
@@ -36,7 +37,7 @@ class Process(Screen):
         # self.ids.count.text = ''
 
         self.ids.status.clear_widgets()
-        for label_id in ['save', 'segment', 'count', 'distance', 'peduncle', 'shape']:
+        for label_id in ['save', 'segment', 'distance', 'peduncle', 'hue', 'shape']:
             label = MDLabel(size_hint=(1, None), height='12dp')
 
             self.ids.status.add_widget(label)
@@ -77,55 +78,50 @@ class Segmenter:
     def __init__(self, **kwargs):
         super(Segmenter, self).__init__(**kwargs)
 
-    def process(self):
-        segmenter = self.app.segmenter
+    def process(self, trial=None, batch=None):
         screen = self.app.get_screen('Process')
         tomato_list = []
 
-        classes = ['up', 'down', 'side']
-        trial = self.app.trial
-        batch = self.app.batch
+        if trial is None:
+            trial = self.app.trial
+
+        if batch is None:
+            batch = self.app.batch
 
         batch_path = os.path.join('assets', 'trials', trial['name'].lower(), batch)
+        # batch_path = os.path.join('..', '..', 'assets', 'trials', trial, batch)
+
         raw_path = os.path.join(batch_path, 'raw')
+        instance_path = os.path.join(batch_path, 'instances')
         os.makedirs(raw_path, exist_ok=True)
-
-        if self.app.batch == '0000':
-            source_path = os.path.join('assets', 'demo')
-
-            for file_name in os.listdir(source_path):
-                # construct full file path
-                source = os.path.join(source_path, file_name)
-                destination = os.path.join(raw_path, file_name)
-
-                shutil.copy(source, destination)
+        os.makedirs(instance_path, exist_ok=True)
 
         screen.ids.segment.text = 'Extracting tomatoes...'
 
-        test_path = os.path.join('libs', 'vision', 'models', 'test', )
-        outputs, bgr_output = segmenter.process_single(os.path.join(raw_path, 'rgb_aligned.png'))
-        # outputs, bgr_output = segmenter.process_single(os.path.join(test_path, 'L515_test1511_tailored_rgb_aligned.png'))
+        segmenter = FruitSegmentron()
+        peduncler = PeduncleSegmentron()
 
-        screen.ids.count.text = 'Found {} tomato\'s'.format(len(outputs['instances']))
-        screen.ids.distance.text = 'Determining tomato distance from camera...'
+        segmentation = segmenter.process_single(os.path.join(raw_path, 'rgb_aligned.png'))
+        # segmentation = segmenter.process_single(test_path)
 
         img = cv2.imread(os.path.join(raw_path, 'rgb_aligned.png'))
-        # img = cv2.imread(os.path.join(test_path, 'L515_test1511_tailored_rgb_aligned.png'))
-        # img = cv2.resize(img, (1280, 720), interpolation=cv2.INTER_AREA)
-        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+
         points = o3d.io.read_point_cloud(os.path.join(raw_path, 'points.ply'))
         Coordinates = np.asarray(points.points).reshape(img.shape)
 
-        instance_path = os.path.join(batch_path, 'instances')
-        os.makedirs(instance_path, exist_ok=True)
+        for idx, key in enumerate(segmentation['Keys']):
+            mask = segmentation['Masks'][idx]
+            orientation = key.split('_')[0]
+
+            cv2.imwrite(os.path.join(instance_path, str(idx + 1) + '.png'),
+                        cv2.cvtColor(segmentation['Images'][idx], cv2.COLOR_BGR2RGB))
 
-        for idx, mask in enumerate(outputs['instances'].pred_masks.numpy()):
-            orientation = outputs['instances'].pred_classes[idx].numpy()
             tomato = {
                 'id': idx + 1,
                 'properties': {
-                    'orientation': classes[orientation],
+                    'orientation': orientation,
                     'trial': trial['name'],
+                    # 'trial': trial,
                     'batch': batch,
                 },
                 'traits': {}
@@ -141,8 +137,14 @@ class Segmenter:
             masked = cv2.bitwise_and(img, img, mask=img_mask)
 
             cropped = masked[topy:bottomy + 1, topx:bottomx + 1]
+            cv2.imwrite(
+                os.path.join(instance_path, str(idx + 1) + '.cropped.png'),
+                cropped)
+
             croppedCoords = CoordMasked[topy:bottomy + 1, topx:bottomx + 1]
 
+            screen.ids.distance.text = 'Calculating distance from camera...'
+
             mmperpix_x = np.zeros(10)
             mmperpix_y = np.zeros(10)
 
@@ -164,113 +166,193 @@ class Segmenter:
             mmperpix = np.round(np.mean([mmperpix_x, mmperpix_y]), 3)
             tomato['properties']['mmmPerPixel'] = mmperpix
 
-            tomato_list.append(tomato)
+            peduncle_mask = None
+            if orientation == 'up':
+                screen.ids.peduncle.text = 'Scoring peduncles scar size...'
 
-            cv2.imwrite(
-                os.path.join(instance_path, str(idx + 1) + '.png'),
-                cropped)
+                ped = peduncler.process_single(cv2.cvtColor(segmentation['Images'][idx], cv2.COLOR_RGB2BGR))
+                peduncle_mask = ped
 
-        screen.ids.peduncle.text = 'Calculating peduncle scar size...'
+                pixels = np.count_nonzero(ped)
+                tomato['traits']['peduncle'] = {
+                    'pixels': int(pixels),
+                    'mm2': round(int(pixels) * (mmperpix * mmperpix), 2),
+                }
 
-        for tomato in [tomato for tomato in tomato_list if tomato['properties']['orientation'] == 'up']:
-            image_path = os.path.join(instance_path, str(tomato['id']) + '.png')
+                im_copy = np.float64(segmentation['Images'][idx].copy())
+                im_copy[:, :, 0][ped] *= 0.6
+                im_copy[:, :, 1][ped] *= 0.6
+                im_copy[:, :, 2][ped] *= 2
 
-            ANNMasker = ANNMaskerInfer()
-            imRGB = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
-            ANNMasker.preprocess(imRGB)
-            ANNMasker.infer()
-            ANNMasker.postprocess()
+                im_copy[im_copy > 255] = 255
+                im_copy[im_copy < 0] = 0
 
-            imRGB_masked = np.float64(imRGB.copy())
-            imRGB_masked[:, :, 1][ANNMasker.mask == 1] *= 0.6
+                # plt.imshow(np.uint8(im_copy))
+                # plt.show()
+                # plt.imshow(segmentation['Images'][idx])
+                # plt.show()
 
-            im = Image.fromarray(np.uint8(imRGB_masked))
-            peduncle_path = os.path.join(instance_path, str(tomato['id']) + '.peduncle.png')
-            im.save(peduncle_path, format='png')
+                converted = cv2.cvtColor(np.uint8(im_copy), cv2.COLOR_BGR2RGB)
 
-            pixels = ANNMasker.mask.sum()
-            pixel_conversion = tomato['properties']['mmmPerPixel']
-            tomato['traits']['peduncle'] = {
-                'pixels': int(pixels),
-                'mm2': round(int(pixels) * (pixel_conversion * pixel_conversion), 2)
-            }
+                cv2.imwrite(os.path.join(instance_path, str(idx + 1) + '.peduncle.png'), converted)
+
+            screen.ids.hue.text = 'Calculating Hue...'
+
+            if orientation == 'up':
+                Kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
+                HSV = cv2.cvtColor(segmentation['Images'][idx], cv2.COLOR_BGR2HSV)
+                mask = peduncle_mask
+
+                mask = (mask == 0) * (HSV[:, :, 2] != 0)
+                mask = cv2.erode(np.uint8(mask), kernel=Kernel)
+
+                HueFlesh = cv2.bitwise_and(HSV, HSV, mask=mask)[:, :, 0]
+                HueFlesh = np.uint8(np.float64(HueFlesh) / (179 / 255)) + 64
+
+                tomato['traits']['color'] = {
+                    'hue': float(np.mean(HueFlesh[mask != 0]))
+                }
+
+            else:
+                HSV = cv2.cvtColor(segmentation['Images'][idx], cv2.COLOR_BGR2HSV)
+                HueFlesh = HSV[:, :, 0]
+                HueFlesh = np.uint8(np.float64(HueFlesh) / (179 / 255)) + 64
+
+                tomato['traits']['color'] = {
+                    'hue': float(np.mean(HueFlesh[HSV[:, :, 2] != 0]))
+                }
+
+            screen.ids.shape.text = 'Determining Shape and Volume...'
+            Kernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
+
+            im = segmentation['Images'][idx].copy()
 
-            imRGB_masked = np.float64(imRGB.copy())
-            imRGB_masked[:, :, 1][ANNMasker.mask == 1] *= 0.6
-
-            fig, axs = plt.subplots(ncols=3, figsize=(3, 3), dpi=300)
-            axs[0].imshow(ANNMasker.mask, cmap='gray')
-            axs[0].title.set_text('mask')
-            axs[0].title.set_fontsize(5)
-            axs[0].axis('off')
-
-            axs[1].imshow(ANNMasker.imRGB)
-            axs[1].title.set_text('RGB')
-            axs[1].title.set_fontsize(5)
-            axs[1].axis('off')
-
-            axs[2].imshow(np.uint8(imRGB_masked))
-            axs[2].title.set_text('RGB masked')
-            axs[2].title.set_fontsize(5)
-            axs[2].axis('off')
-
-            plt.show()
-
-        # classifier = pickle.load(open('libs/vision/models/peduncle.pkl', 'rb'))
-        # for tomato in [tomato for tomato in tomato_list if tomato['properties']['orientation'] == 'up']:
-        #     image_path = os.path.join(instance_path, str(tomato['id']) + '.png')
-        #
-        #     data = np.float64(cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB))
-        #     data /= np.mean(data[data != 0]) / 80
-        #     data[data > 255] = 255
-        #     imRGB = data.copy()
-        #     data = cv2.cvtColor(np.uint8(data), cv2.COLOR_RGB2HSV)
-        #
-        #     data_reshaped = data.reshape([np.prod(data.shape[:2]), data.shape[2]])
-        #     mask_reshaped_proba = classifier.predict_proba(data_reshaped)[:, 1]
-        #     mask_proba = np.float64(mask_reshaped_proba.reshape(data.shape[:2]))
-        #     mask = mask_proba > 0.5
-        #     # mask = self.remove_small_clusters(mask)
-        #
-        #     clusters = cv2.connectedComponentsWithStats(np.uint8(mask), 8, cv2.CV_32S)
-        #     clusters = list(clusters)
-        #
-        #     if clusters[0] > 2:
-        #         scores = np.zeros(clusters[0] - 1)
-        #         for i in range(1, clusters[0]):
-        #             cluster_mask_proba = ((clusters[1] == i) * mask_proba)
-        #             scores[i - 1] = np.mean(cluster_mask_proba[cluster_mask_proba != 0])
-        #
-        #         if np.sum(scores == np.max(scores)) != 1:  # Check if multiple clusters have same score
-        #             VectorLengths = np.zeros(np.sum(scores == np.max(scores)))
-        #             for i in range(np.sum(scores == np.max(scores))):
-        #                 Vector = clusters[3][i + 1, :] - np.array(mask.shape, dtype='float64') / 2
-        #                 VectorLengths[i] = np.sqrt(np.sum(Vector ** 2))
-        #             mask = clusters[1] == (np.argmin(VectorLengths) + 1)
-        #         else:
-        #             mask = clusters[1] == (np.argmax(scores) + 1)
-        #
-        #     imRGB[:, :, 2] += 0.8 * 255 * mask
-        #     imRGB[:, :, 2][imRGB[:, :, 2] > 255] = 255
-        #     imRGB[:, :, 1] -= 0.4 * 255 * mask
-        #     imRGB[:, :, 1][imRGB[:, :, 1] < 0] = 0
-        #
-        #     im = Image.fromarray(np.uint8(imRGB))
-        #     peduncle_path = os.path.join(instance_path, str(tomato['id']) + '.peduncle.png')
-        #
-        #     im.save(peduncle_path, format='png')
-        #     pixels = mask.sum()
-        #     pixel_conversion = tomato['properties']['mmmPerPixel']
-        #     tomato['traits']['peduncle'] = {
-        #         'pixels': int(pixels),
-        #         'mm2': round(int(pixels) * (pixel_conversion * pixel_conversion), 2)
-        #     }
-
-        screen.ids.shape.text = 'Determining shape...'
+            m = np.uint8(np.sum(segmentation['Images'][idx].copy(), axis=2) != 0)
+            Clusters = cv2.connectedComponentsWithStats(m, 8, cv2.CV_32S)
+            mask = np.uint8(Clusters[1] == (np.argmax(Clusters[2][1:, 4]) + 1))
+
+            # mask = np.uint8(np.sum(m_new, axis=2) != 0)
+            mask = cv2.erode(mask, kernel=Kernel1)
+            mask = cv2.dilate(mask, kernel=Kernel1)
+            mask = cv2.dilate(mask, kernel=Kernel1)
+            mask = cv2.erode(mask, kernel=Kernel1)
+
+            cnt = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
+            ellipse = cv2.fitEllipse(cnt[0][0])
+
+            anglesLA = [ellipse[2], ellipse[2] - 180]
+            anglesSA = [(ellipse[2] + 90) % 180 - (ellipse[2] >= 90) * 180,
+                        ellipse[2] - 90]
+
+            if orientation == 'side':  # Find where the peduncle is
+                cntCoord = np.zeros([cnt[0][0].shape[0], 3])
+                cntCoord[:, :2] = cnt[0][0][:, 0, :]
+
+                for i in range(cnt[0][0].shape[0]):
+                    Vec = cntCoord[i, :2] - ellipse[0]
+                    unitVec = Vec / np.linalg.norm(Vec)
+                    dotProd = np.dot(unitVec, [0, 1])
+                    cntCoord[i, 2] = -np.sign(unitVec[0]) * (np.arccos(dotProd))
+
+                ellipseCnt = np.where(cv2.ellipse(np.zeros(im.shape),
+                                                  tuple(np.uint16(ellipse[0])),
+                                                  tuple(np.uint16(np.array(ellipse[1]) / 2)),
+                                                  ellipse[2], 0, 360, (0, 0, 255), 1)[:, :, 2] != 0)
+
+                ellipseCoord = np.zeros([ellipseCnt[0].shape[0], 3])
+                ellipseCoord[:, 0] = ellipseCnt[1]
+                ellipseCoord[:, 1] = ellipseCnt[0]
+
+                for i in range(ellipseCnt[0].shape[0]):
+                    Vec = ellipseCoord[i, :2] - ellipse[0]
+                    unitVec = Vec / np.linalg.norm(Vec)
+                    dotProd = np.dot(unitVec, [0, 1])
+                    ellipseCoord[i, 2] = -np.sign(unitVec[0]) * (np.arccos(dotProd))
+
+                Angles = np.linspace(-np.pi, np.pi, 361)
+                ellipseErrors = np.zeros(len(Angles))
+
+                for i, angle in enumerate(Angles):
+                    cntVec = cntCoord[np.argmin(abs(cntCoord[:, 2] - angle)), :2] - ellipse[0]
+                    ellipseVec = ellipseCoord[np.argmin(abs(ellipseCoord[:, 2] - angle)), :2] - ellipse[0]
+                    ellipseErrors[i] = np.sqrt(np.sum(cntVec ** 2)) - np.sqrt(np.sum(ellipseVec ** 2))
+
+                critAngle = Angles[np.argmin(ellipseErrors)]
+                critAngles = [(360 / (2 * np.pi)) * critAngle,
+                              ((360 / (2 * np.pi)) * critAngle - 180)]
+
+                if critAngles[1] < -180:
+                    critAngles[1] = critAngles[1] % 180
+
+                if np.min(ellipseErrors) < -min(ellipse[1]) / 18:
+                    angleDiffs = np.zeros([2, 2, 2])
+
+                    for h, angles in enumerate([anglesSA, anglesLA]):
+                        for i in range(2):
+                            for j in range(2):
+                                angleDiffs[h, i, j] = abs(angles[i] - critAngles[j])
+
+                    if list(set(np.where(angleDiffs == np.min(angleDiffs))[0]))[0] == 0:
+                        tomato['traits']['dimensions'] = {
+                            'height': ellipse[1][0] * mmperpix
+                        }
+                    elif list(set(np.where(angleDiffs == np.min(angleDiffs))[0]))[0] == 1:
+                        tomato['traits']['dimensions'] = {
+                            'height': ellipse[1][1] * mmperpix
+                        }
+                else:
+                    tomato['traits']['dimensions'] = {
+                        'height': ellipse[1][0] * mmperpix
+                    }
+
+            image = cv2.ellipse(im,
+                                tuple(np.uint16(ellipse[0])),
+                                tuple(np.uint16(np.array(ellipse[1]) / 2)),
+                                ellipse[2], 0, 360, (0, 255, 0), 1)
+            # image = cv2.drawContours(im, cnt[0], 0, (0, 255, 0), 1)
+
+            if (orientation == 'up') | (orientation == 'down'):
+                if 'dimensions' not in tomato['traits']:
+                    tomato['traits']['dimensions'] = {}
+
+                short_axis = ellipse[1][0] * mmperpix
+                long_axis = ellipse[1][1] * mmperpix
+
+                tomato['traits']['dimensions']['shortAxis'] = short_axis
+                tomato['traits']['dimensions']['longAxis'] = long_axis
+
+            converted = cv2.cvtColor(np.uint8(image), cv2.COLOR_BGR2RGB)
+            cv2.imwrite(os.path.join(instance_path, str(idx + 1) + '.ellipse.png'), converted)
+
+            tomato_list.append(tomato)
+
+        short, long, height = 0, 0, 0
+        short_count, long_count, height_count = 0, 0, 0
+
+        for tomato in tomato_list:
+            dimensions = tomato['traits']['dimensions']
+
+            if 'height' in dimensions:
+                height = height + dimensions['height']
+                height_count = height_count + 1
+
+            if 'shortAxis' in dimensions:
+                short = short + dimensions['shortAxis']
+                short_count = short_count + 1
+
+            if 'longAxis' in dimensions:
+                long = long + dimensions['longAxis']
+                long_count = long_count + 1
+
+        batch_volume = (1/6) * np.pi * (short / short_count) * (long / long_count) * (height / height_count)
+        shape_ratio = np.mean([(short / short_count), (long / long_count)]) / (height / height_count)
+
+        for tomato in tomato_list:
+            tomato['traits']['dimensions']['batchVolume'] = batch_volume
+            tomato['traits']['dimensions']['shapeRatio'] = shape_ratio
 
         with open(os.path.join(batch_path, '.instances'), 'w', encoding='utf-8') as outfile:
             json.dump(tomato_list, outfile)
-            # instance_path, str(idx + 1) + '.png'
 
         self.app.transition('Trial')
 
@@ -290,3 +372,10 @@ class Segmenter:
             new_mask += clusters[1] == idx
 
         return new_mask == 1
+
+
+if __name__ == '__main__':
+    file = os.path.join(os.path.dirname(__file__), 'test', 'D415_test1_1_single_rgb_raw.png')
+
+    sgm = Segmenter()
+    sgm.process('test', '101')
diff --git a/libs/baseclass/save.py b/libs/baseclass/save.py
index d7c9eecae2e23f136f70e39f270d2338a05e8d6d..e9938662ceb43cac900bc55945da7829084033de 100644
--- a/libs/baseclass/save.py
+++ b/libs/baseclass/save.py
@@ -24,14 +24,16 @@ class Save(Screen):
         super(Save, self).__init__(**kwargs)
         Window.bind(on_key_down=self._on_keyboard_down)
 
-        if self.app.batch == '0000':
-            self.ids.image.source = os.path.join('assets', 'demo', 'rgb_raw.png')
-
     def _on_keyboard_down(self, instance, keyboard, keycode, text, modifiers):
         if self.batch.focus and keycode == 40:  # 40 - Enter key pressed
             self.ids.button.trigger_action()
 
     def on_pre_enter(self, *args):
+        batch_path = os.path.join('assets', 'trials', self.app.trial['name'].lower())
+        batches = len(os.listdir(batch_path))
+
+        self.save(str(batches))
+
         if 'image' in self.ids:
             self.ids.image.reload()
         if 'batch' in self.ids:
diff --git a/libs/capturing/realsensecapture.py b/libs/capturing/realsensecapture.py
index 16390eec2a17ed741481f26bd8c36c316b4e1a96..fd61de257faa6492fe4bc47b06030bb6d9a35060 100644
--- a/libs/capturing/realsensecapture.py
+++ b/libs/capturing/realsensecapture.py
@@ -19,9 +19,9 @@ class RealsenseCapture:
         self.config = rs.config()
 
         self.color_camera_options = {
-            rs.option.auto_exposure_priority: True,
-            rs.option.enable_auto_exposure: True,
-            # rs.option.exposure: 100,  # microseconds, max 165,000
+            # rs.option.auto_exposure_priority: True,
+            # rs.option.enable_auto_exposure: True,
+            rs.option.exposure: 150,  # microseconds, max 165,000
             rs.option.enable_auto_white_balance: True,
             # rs.option.white_balance: 5400
         }
@@ -159,7 +159,7 @@ class RealsenseCapture:
             # "irR": ir_r
         }
 
-    def get_frames(self, frame_count=1):
+    def get_frames(self, frame_count=60):
         align = rs.align(rs.stream.depth)
 
         p = self.profile.get_stream(rs.stream.depth)
diff --git a/libs/kv/batch.kv b/libs/kv/batch.kv
index 3e8755fb4d4021af52885ab322d924abd68e3556..a182444b73ad08c7931ad2ca2904ffb532b12702 100644
--- a/libs/kv/batch.kv
+++ b/libs/kv/batch.kv
@@ -13,6 +13,7 @@
         orientation: 'horizontal'
 
         MDBoxLayout:
+            id: images
             padding: '12dp'
 
             size_hint: None, None
@@ -27,32 +28,11 @@
                 allow_stretch: True
 
         MDBoxLayout:
+            id: traits
             size_hint: 1, None
             height: 168
             orientation: 'vertical'
 
-            MDLabel:
-                id: peduncle
-                text: ''
-                adaptive_height: True
-
-            MDLabel:
-                id: trait1
-                text: 'Trait 2: Value'
-                adaptive_height: True
-
-            MDLabel:
-                id: trait1
-                text: 'Trait 3: Value'
-                adaptive_height: True
-
-            MDLabel:
-                id: trait1
-                text: 'Trait 4: Value'
-                adaptive_height: True
-
-            Widget:
-
 
 <Batch>
     name: 'Batch'
@@ -60,6 +40,7 @@
     batch: app.batch
 
     MDBoxLayout:
+        size_hint: 1, 1
         orientation: 'vertical'
 
         Toolbar:
@@ -67,7 +48,6 @@
             pos_hint: {'top': 1}
             left_action_items: [['arrow-left', lambda x: app.transition('Trial', app.trial)]]
 
-
         MDBoxLayout:
             size_hint: 1, 1
             orientation: 'horizontal'
@@ -85,44 +65,56 @@
                     allow_stretch: True
                     height: '480dp'
 
-            MDBoxLayout:
-                id: instances
-                size_hint: 0.5, 1
-                padding: '12dp'
-                spacing: '12dp'
-                orientation: 'vertical'
-
 #                ScrollView:
 #                    do_scroll_y: True
 ##
-                MDLabel:
-                    adaptive_height: True
-                    text: 'Facing up'
-
-                MDBoxLayout:
-                    id: up
-                    orientation: 'vertical'
-                    spacing: '12dp'
-                    adaptive_height: True
-
-                MDLabel:
-                    adaptive_height: True
-                    text: 'Facing down'
-
-                MDBoxLayout:
-                    id: down
-                    orientation: 'vertical'
-                    spacing: '12dp'
-                    adaptive_height: True
-
-                MDLabel:
-                    adaptive_height: True
-                    text: 'Facing sideways'
-
-                MDBoxLayout:
-                    id: side
-                    orientation: 'vertical'
-                    spacing: '12dp'
-                    adaptive_height: True
-
-                Widget:
\ No newline at end of file
+
+            MDBoxLayout:
+#                id: instances
+                size_hint: 0.5, 1
+#                padding: '12dp'
+#                spacing: '12dp'
+#                orientation: 'vertical'
+
+                ScrollView:
+                    do_scroll_y: True
+
+                    MDBoxLayout:
+                        id: instances
+                        size_hint: 1, None
+                        padding: '12dp'
+                        spacing: '12dp'
+                        orientation: 'vertical'
+                        height: self.minimum_height
+
+                        MDLabel:
+                            adaptive_height: True
+                            text: 'Facing up'
+
+                        MDBoxLayout:
+                            id: up
+                            orientation: 'vertical'
+                            spacing: '12dp'
+                            adaptive_height: True
+
+                        MDLabel:
+                            adaptive_height: True
+                            text: 'Facing down'
+
+                        MDBoxLayout:
+                            id: down
+                            orientation: 'vertical'
+                            spacing: '12dp'
+                            adaptive_height: True
+
+                        MDLabel:
+                            adaptive_height: True
+                            text: 'Facing sideways'
+
+                        MDBoxLayout:
+                            id: side
+                            orientation: 'vertical'
+                            spacing: '12dp'
+                            adaptive_height: True
+
+                        Widget:
\ No newline at end of file
diff --git a/libs/kv/trial.kv b/libs/kv/trial.kv
index 2bac44ae2c3cde7486ce8f027ed102c71a98dbdd..885584f8b6ae13bb3aed6bef58944c1f18779783 100644
--- a/libs/kv/trial.kv
+++ b/libs/kv/trial.kv
@@ -87,8 +87,8 @@
             pos: root.width / 2 - dp(24), dp(24)
             on_release: app.transition('Snap')
 
-        MDFloatingActionButton:
-            icon: 'plus'
-            elevation: 8
-            pos: dp(24), dp(24)
-            on_release: app.transition('Save', root.trial, '0000')
\ No newline at end of file
+#        MDFloatingActionButton:
+#            icon: 'pencil'
+#            elevation: 8
+#            pos: dp(24), dp(24)
+#            on_release: app.transition('Save', root.trial, '0000')
\ No newline at end of file
diff --git a/libs/vision/models/peduncle/keras_metadata.pb b/libs/vision/models/peduncle.legacy/keras_metadata.pb
similarity index 100%
rename from libs/vision/models/peduncle/keras_metadata.pb
rename to libs/vision/models/peduncle.legacy/keras_metadata.pb
diff --git a/libs/vision/models/peduncle/saved_model.pb b/libs/vision/models/peduncle.legacy/saved_model.pb
similarity index 100%
rename from libs/vision/models/peduncle/saved_model.pb
rename to libs/vision/models/peduncle.legacy/saved_model.pb
diff --git a/libs/vision/models/peduncle/variables/variables.data-00000-of-00001 b/libs/vision/models/peduncle.legacy/variables/variables.data-00000-of-00001
similarity index 100%
rename from libs/vision/models/peduncle/variables/variables.data-00000-of-00001
rename to libs/vision/models/peduncle.legacy/variables/variables.data-00000-of-00001
diff --git a/libs/vision/models/peduncle/variables/variables.index b/libs/vision/models/peduncle.legacy/variables/variables.index
similarity index 100%
rename from libs/vision/models/peduncle/variables/variables.index
rename to libs/vision/models/peduncle.legacy/variables/variables.index
diff --git a/libs/vision/models/peduncle/peduncle.0.pth b/libs/vision/models/peduncle/peduncle.0.pth
new file mode 100755
index 0000000000000000000000000000000000000000..cb56905203c1757377b8980897e05d14cde543da
Binary files /dev/null and b/libs/vision/models/peduncle/peduncle.0.pth differ
diff --git a/libs/vision/models/peduncle/peduncle.pth b/libs/vision/models/peduncle/peduncle.pth
new file mode 100755
index 0000000000000000000000000000000000000000..ac913eeda908c4efa6574166f64e32f383665312
Binary files /dev/null and b/libs/vision/models/peduncle/peduncle.pth differ
diff --git a/libs/vision/models/segmenter/segmentation.1.pth b/libs/vision/models/segmenter/segmentation.1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d1844fa91e6b8303b7cc38d637740ec608b13c84
Binary files /dev/null and b/libs/vision/models/segmenter/segmentation.1.pth differ
diff --git a/libs/vision/models/segmenter/segmentation.pth b/libs/vision/models/segmenter/segmentation.pth
old mode 100644
new mode 100755
index d1844fa91e6b8303b7cc38d637740ec608b13c84..1e6b0e760d8183d59768ae8ce96f61e676d4a6ac
Binary files a/libs/vision/models/segmenter/segmentation.pth and b/libs/vision/models/segmenter/segmentation.pth differ
diff --git a/libs/vision/peduncle.ann.py b/libs/vision/peduncle.ann.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbcd1a7b632a79f6cd4a1314751ddb375357adaa
--- /dev/null
+++ b/libs/vision/peduncle.ann.py
@@ -0,0 +1,217 @@
+import json
+import os
+import numpy as np
+import cv2
+import pickle
+
+from keras.models import load_model
+from skimage.segmentation import flood_fill
+
+
+class ANNMaskerInfer:
+    
+    modelDict = None
+
+    def __init__(self):
+
+        model_path = os.path.join(os.path.dirname(__file__), 'models')
+        with open(os.path.join(model_path, 'modelDict.json'), 'rb') as f:
+            self.modelDict = json.load(f)
+            print(self.modelDict)
+
+        if '.pkl' in self.modelDict['model']:
+            self.model = pickle.load(open(os.path.join(model_path, self.modelDict['model'], 'rb')))
+        else:
+            print(model_path)
+            self.model = load_model(os.path.join(model_path, self.modelDict['model']))
+            dir(self.model)
+
+        self.MeanVec = self.modelDict['mean vec']
+        self.MaxVec = self.modelDict['max vec']
+
+        if self.modelDict['context'] is not None:
+            self.contextType = self.modelDict['context']['type']
+            self.contextPad = self.modelDict['context']['pad']
+            self.contextSigma = self.modelDict['context']['sigma']
+            self.contextMC = self.modelDict['context']['multiple circles']
+        else:
+            self.contextType = None
+
+    def contextualize(self, data):
+        data = cv2.copyMakeBorder(data, self.contextPad, self.contextPad,
+                                  self.contextPad, self.contextPad,
+                                  borderType=cv2.BORDER_REPLICATE)
+
+        sigma = self.contextSigma
+        offset = self.contextPad
+        Kernel = np.zeros([2 * offset + 1, 2 * offset + 1])
+        for i in range(Kernel.shape[0]):
+            for j in range(Kernel.shape[1]):
+                Kernel[i, j] = np.exp(
+                    -(1 / (2 * sigma ** 2)) * (np.sqrt((i - offset) ** 2 + (j - offset) ** 2) - offset / 2.2) ** 2)
+        Kernel /= np.sum(Kernel)
+
+        if self.contextType == 'cross':
+            if self.contextMC == False:
+                KernelUp = np.zeros([2 * offset + 1, 2 * offset + 1])
+
+                for i in range(Kernel.shape[0]):
+                    for j in range(Kernel.shape[1]):
+                        v1 = [j - offset, offset - i]
+                        uv2 = [0, 1]
+                        if (v1 != [0, 0]):
+                            uv1 = v1 / np.linalg.norm(v1)
+                            angle = np.arccos(np.dot(uv1, uv2))
+                            KernelUp[i, j] = np.exp(-(1 / (2 * 0.1 ** 2)) * (angle / np.pi) ** 2)
+
+                KernelUp *= Kernel
+                KernelUp /= np.sum(KernelUp)
+                KernelLeft = np.rot90(KernelUp)
+                KernelDown = np.rot90(KernelLeft)
+                KernelRight = np.rot90(KernelDown)
+
+                data_ = np.zeros([data.shape[0], data.shape[1], 15])
+                data_[:, :, :3] = data
+                data_[:, :, 3:6] = cv2.filter2D(data, -1, KernelUp)
+                data_[:, :, 6:9] = cv2.filter2D(data, -1, KernelRight)
+                data_[:, :, 9:12] = cv2.filter2D(data, -1, KernelDown)
+                data_[:, :, 12:15] = cv2.filter2D(data, -1, KernelLeft)
+
+            elif self.contextMC is not False:
+                mc = len(self.contextMC)
+                offsets = np.array(self.contextMC)
+                sigmas = sigma * offsets / np.max(offsets)
+
+                data_ = np.zeros([data.shape[0], data.shape[1], 3 * (1 + 4 * mc)])
+                data_[:, :, :3] = data
+
+                Kernels = [[]] * 4 * mc
+
+                for k in range(mc):
+                    Kernel = np.zeros([2 * offset + 1, 2 * offset + 1])
+
+                    for i in range(Kernel.shape[0]):
+                        for j in range(Kernel.shape[1]):
+                            Kernel[i, j] = np.exp(-(1 / (2 * sigmas[k] ** 2)) * (
+                                    np.sqrt((i - offset) ** 2 + (j - offset) ** 2) - offsets[k]) ** 2)
+
+                    KernelUp = np.zeros([2 * offset + 1, 2 * offset + 1])
+
+                    for i in range(Kernel.shape[0]):
+                        for j in range(Kernel.shape[1]):
+                            v1 = [j - offset, offset - i]
+                            uv2 = [0, 1]
+                            if (v1 != [0, 0]):
+                                uv1 = v1 / np.linalg.norm(v1)
+                                angle = np.arccos(np.dot(uv1, uv2))
+                                KernelUp[i, j] = np.exp(-(1 / (2 * 0.1 ** 2)) * (angle / np.pi) ** 2)
+
+                    KernelUp *= Kernel
+                    KernelUp /= np.sum(KernelUp)
+                    KernelLeft = np.rot90(KernelUp)
+                    KernelDown = np.rot90(KernelLeft)
+                    KernelRight = np.rot90(KernelDown)
+
+                    data_[:, :, (3 + 12 * k):(6 + 12 * k)] = cv2.filter2D(data, -1, KernelUp)
+                    data_[:, :, (6 + 12 * k):(9 + 12 * k)] = cv2.filter2D(data, -1, KernelRight)
+                    data_[:, :, (9 + 12 * k):(12 + 12 * k)] = cv2.filter2D(data, -1, KernelDown)
+                    data_[:, :, (12 + 12 * k):(15 + 12 * k)] = cv2.filter2D(data, -1, KernelLeft)
+
+
+        elif self.contextType == 'circle':
+            if self.contextMC == False:
+                data_ = np.zeros([data.shape[0], data.shape[1], 6])
+                data_[:, :, :3] = data
+                data_[:, :, 3:6] = cv2.filter2D(data, -1, Kernel)
+
+            elif self.contextMC is not False:
+                mc = len(self.contextMC)
+                offsets = np.array(self.contextMC)
+                sigmas = sigma * offsets / np.max(offsets)
+
+                data_ = np.zeros([data.shape[0], data.shape[1], 3 * (1 + mc)])
+                data_[:, :, :3] = data
+
+                Kernels = [[]] * mc
+                for k in range(mc):
+                    Kernels[k] = np.zeros([2 * offset + 1, 2 * offset + 1])
+
+                    for i in range(Kernel.shape[0]):
+                        for j in range(Kernel.shape[1]):
+                            Kernels[k][i, j] = np.exp(-(1 / (2 * sigmas[k] ** 2)) * (
+                                    np.sqrt((i - offset) ** 2 + (j - offset) ** 2) - offsets[k]) ** 2)
+                    Kernels[k] /= np.sum(Kernels[k])
+                    data_[:, :, (3 * (1 + k)):(3 * (2 + k))] = cv2.filter2D(data, -1, Kernels[k])
+
+        data = data_[offset:-offset, offset:-offset, :]
+
+        return data
+
+    def RemoveSmallClusters(self, mask, MinSize):
+        NewMask = np.zeros([mask.shape[0], mask.shape[1]])
+        # Find all the pixel clusters in the Carrots mask
+        Clusters = cv2.connectedComponentsWithStats(np.uint8(mask), 8, cv2.CV_32S)
+        Clusters = list(Clusters)
+
+        # Find only clusters with a minimum size (SizeThreshold)
+        ObjectIdx = np.where(Clusters[2][1:, 4] > MinSize)[0] + 1
+        if len(ObjectIdx) > 0:
+            for idx in ObjectIdx:
+                NewMask += Clusters[1] == idx
+        else:
+            idx = np.argmax(Clusters[2][1:, 4]) + 1
+            NewMask += Clusters[1] == idx
+
+        return (NewMask == 1)
+
+    def preprocess(self, imRGB):
+        self.imRGB = imRGB
+        data = np.float64(self.imRGB)
+        data /= np.mean(data[data != 0]) / 80
+        data[data > 255] = 255
+        data = np.uint8(data)
+
+        data = np.float64(cv2.cvtColor(np.uint8(data), cv2.COLOR_RGB2HSV))
+        if self.contextType is not None:
+            data = self.contextualize(data)
+
+        data_reshaped = data.reshape([np.prod(data.shape[:2]), data.shape[2]])
+        data_reshaped -= self.MeanVec
+        data_reshaped /= self.MaxVec
+
+        self.data_reshaped = data_reshaped
+
+    def infer(self):
+        if '.pkl' in self.modelDict['model']:
+            mask_reshaped_proba = self.model.predict_proba(self.data_reshaped)[:, 1]
+        else:
+            mask_reshaped_proba = self.model.predict(self.data_reshaped)[:, 1]
+
+        self.mask_proba = mask_reshaped_proba.reshape(self.imRGB.shape[:2])
+        self.mask = self.mask_proba > 0.5
+
+    def postprocess(self):
+        Clusters = cv2.connectedComponentsWithStats(np.uint8(self.mask), 8, cv2.CV_32S)
+        Clusters = list(Clusters)
+
+        if Clusters[0] > 2:
+            Scores = np.zeros(Clusters[0] - 1)
+            for i in range(1, Clusters[0]):
+                cluster_mask_proba = ((Clusters[1] == i) * self.mask_proba)
+                Scores[i - 1] = np.mean(cluster_mask_proba[cluster_mask_proba != 0])
+
+            if np.sum(Scores == np.max(Scores)) != 1:  # Check if multiple clusters have same score
+                VectorLengths = np.zeros(np.sum(Scores == np.max(Scores)))
+                for i in range(np.sum(Scores == np.max(Scores))):
+                    Vector = Clusters[3][i + 1, :] - np.array(self.mask.shape, dtype='float64') / 2
+                    VectorLengths[i] = np.sqrt(np.sum(Vector ** 2))
+                self.mask = Clusters[1] == (np.argmin(VectorLengths) + 1)
+            else:
+                self.mask = Clusters[1] == (np.argmax(Scores) + 1)
+
+        Kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))
+        self.mask = cv2.dilate(np.uint8(self.mask), kernel=Kernel)
+        self.mask = cv2.erode(self.mask, kernel=Kernel)
+
+        im = flood_fill(self.mask, (0, 0), 1)
+        self.mask += np.uint8(im == 0)
diff --git a/libs/vision/peduncle.py b/libs/vision/peduncle.py
index dbcd1a7b632a79f6cd4a1314751ddb375357adaa..57a0bed26e026ac931c3a2056ba83d5ec3811c1c 100644
--- a/libs/vision/peduncle.py
+++ b/libs/vision/peduncle.py
@@ -1,217 +1,85 @@
-import json
 import os
-import numpy as np
-import cv2
-import pickle
+import time
+import torchvision
+
+from detectron2.utils.logger import setup_logger
+
+setup_logger()
+from detectron2 import model_zoo
+from detectron2.engine import DefaultPredictor
+from detectron2.config import get_cfg
+from detectron2.utils.visualizer import Visualizer
+from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_test_loader
+from detectron2.data.datasets import register_coco_instances
+from detectron2.engine import DefaultTrainer
+from detectron2.evaluation import COCOEvaluator, inference_on_dataset
+from detectron2.modeling import build_model
 
-from keras.models import load_model
 from skimage.segmentation import flood_fill
 
 
-class ANNMaskerInfer:
-    
-    modelDict = None
+WEIGHTSFOLDER = os.path.join(os.path.dirname(__file__), 'models', 'peduncle')
 
-    def __init__(self):
 
-        model_path = os.path.join(os.path.dirname(__file__), 'models')
-        with open(os.path.join(model_path, 'modelDict.json'), 'rb') as f:
-            self.modelDict = json.load(f)
-            print(self.modelDict)
+class PeduncleSegmentron():
+    def __init__(self) -> None:
+        print('initialising detectron')
+        config_file = 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml'
+        classes = ['Peduncle scar']
+        input_size = 160
 
-        if '.pkl' in self.modelDict['model']:
-            self.model = pickle.load(open(os.path.join(model_path, self.modelDict['model'], 'rb')))
-        else:
-            print(model_path)
-            self.model = load_model(os.path.join(model_path, self.modelDict['model']))
-            dir(self.model)
-
-        self.MeanVec = self.modelDict['mean vec']
-        self.MaxVec = self.modelDict['max vec']
-
-        if self.modelDict['context'] is not None:
-            self.contextType = self.modelDict['context']['type']
-            self.contextPad = self.modelDict['context']['pad']
-            self.contextSigma = self.modelDict['context']['sigma']
-            self.contextMC = self.modelDict['context']['multiple circles']
-        else:
-            self.contextType = None
-
-    def contextualize(self, data):
-        data = cv2.copyMakeBorder(data, self.contextPad, self.contextPad,
-                                  self.contextPad, self.contextPad,
-                                  borderType=cv2.BORDER_REPLICATE)
-
-        sigma = self.contextSigma
-        offset = self.contextPad
-        Kernel = np.zeros([2 * offset + 1, 2 * offset + 1])
-        for i in range(Kernel.shape[0]):
-            for j in range(Kernel.shape[1]):
-                Kernel[i, j] = np.exp(
-                    -(1 / (2 * sigma ** 2)) * (np.sqrt((i - offset) ** 2 + (j - offset) ** 2) - offset / 2.2) ** 2)
-        Kernel /= np.sum(Kernel)
-
-        if self.contextType == 'cross':
-            if self.contextMC == False:
-                KernelUp = np.zeros([2 * offset + 1, 2 * offset + 1])
-
-                for i in range(Kernel.shape[0]):
-                    for j in range(Kernel.shape[1]):
-                        v1 = [j - offset, offset - i]
-                        uv2 = [0, 1]
-                        if (v1 != [0, 0]):
-                            uv1 = v1 / np.linalg.norm(v1)
-                            angle = np.arccos(np.dot(uv1, uv2))
-                            KernelUp[i, j] = np.exp(-(1 / (2 * 0.1 ** 2)) * (angle / np.pi) ** 2)
-
-                KernelUp *= Kernel
-                KernelUp /= np.sum(KernelUp)
-                KernelLeft = np.rot90(KernelUp)
-                KernelDown = np.rot90(KernelLeft)
-                KernelRight = np.rot90(KernelDown)
-
-                data_ = np.zeros([data.shape[0], data.shape[1], 15])
-                data_[:, :, :3] = data
-                data_[:, :, 3:6] = cv2.filter2D(data, -1, KernelUp)
-                data_[:, :, 6:9] = cv2.filter2D(data, -1, KernelRight)
-                data_[:, :, 9:12] = cv2.filter2D(data, -1, KernelDown)
-                data_[:, :, 12:15] = cv2.filter2D(data, -1, KernelLeft)
-
-            elif self.contextMC is not False:
-                mc = len(self.contextMC)
-                offsets = np.array(self.contextMC)
-                sigmas = sigma * offsets / np.max(offsets)
-
-                data_ = np.zeros([data.shape[0], data.shape[1], 3 * (1 + 4 * mc)])
-                data_[:, :, :3] = data
-
-                Kernels = [[]] * 4 * mc
-
-                for k in range(mc):
-                    Kernel = np.zeros([2 * offset + 1, 2 * offset + 1])
-
-                    for i in range(Kernel.shape[0]):
-                        for j in range(Kernel.shape[1]):
-                            Kernel[i, j] = np.exp(-(1 / (2 * sigmas[k] ** 2)) * (
-                                    np.sqrt((i - offset) ** 2 + (j - offset) ** 2) - offsets[k]) ** 2)
-
-                    KernelUp = np.zeros([2 * offset + 1, 2 * offset + 1])
-
-                    for i in range(Kernel.shape[0]):
-                        for j in range(Kernel.shape[1]):
-                            v1 = [j - offset, offset - i]
-                            uv2 = [0, 1]
-                            if (v1 != [0, 0]):
-                                uv1 = v1 / np.linalg.norm(v1)
-                                angle = np.arccos(np.dot(uv1, uv2))
-                                KernelUp[i, j] = np.exp(-(1 / (2 * 0.1 ** 2)) * (angle / np.pi) ** 2)
-
-                    KernelUp *= Kernel
-                    KernelUp /= np.sum(KernelUp)
-                    KernelLeft = np.rot90(KernelUp)
-                    KernelDown = np.rot90(KernelLeft)
-                    KernelRight = np.rot90(KernelDown)
-
-                    data_[:, :, (3 + 12 * k):(6 + 12 * k)] = cv2.filter2D(data, -1, KernelUp)
-                    data_[:, :, (6 + 12 * k):(9 + 12 * k)] = cv2.filter2D(data, -1, KernelRight)
-                    data_[:, :, (9 + 12 * k):(12 + 12 * k)] = cv2.filter2D(data, -1, KernelDown)
-                    data_[:, :, (12 + 12 * k):(15 + 12 * k)] = cv2.filter2D(data, -1, KernelLeft)
-
-
-        elif self.contextType == 'circle':
-            if self.contextMC == False:
-                data_ = np.zeros([data.shape[0], data.shape[1], 6])
-                data_[:, :, :3] = data
-                data_[:, :, 3:6] = cv2.filter2D(data, -1, Kernel)
-
-            elif self.contextMC is not False:
-                mc = len(self.contextMC)
-                offsets = np.array(self.contextMC)
-                sigmas = sigma * offsets / np.max(offsets)
-
-                data_ = np.zeros([data.shape[0], data.shape[1], 3 * (1 + mc)])
-                data_[:, :, :3] = data
-
-                Kernels = [[]] * mc
-                for k in range(mc):
-                    Kernels[k] = np.zeros([2 * offset + 1, 2 * offset + 1])
-
-                    for i in range(Kernel.shape[0]):
-                        for j in range(Kernel.shape[1]):
-                            Kernels[k][i, j] = np.exp(-(1 / (2 * sigmas[k] ** 2)) * (
-                                    np.sqrt((i - offset) ** 2 + (j - offset) ** 2) - offsets[k]) ** 2)
-                    Kernels[k] /= np.sum(Kernels[k])
-                    data_[:, :, (3 * (1 + k)):(3 * (2 + k))] = cv2.filter2D(data, -1, Kernels[k])
-
-        data = data_[offset:-offset, offset:-offset, :]
-
-        return data
-
-    def RemoveSmallClusters(self, mask, MinSize):
-        NewMask = np.zeros([mask.shape[0], mask.shape[1]])
-        # Find all the pixel clusters in the Carrots mask
-        Clusters = cv2.connectedComponentsWithStats(np.uint8(mask), 8, cv2.CV_32S)
-        Clusters = list(Clusters)
-
-        # Find only clusters with a minimum size (SizeThreshold)
-        ObjectIdx = np.where(Clusters[2][1:, 4] > MinSize)[0] + 1
-        if len(ObjectIdx) > 0:
-            for idx in ObjectIdx:
-                NewMask += Clusters[1] == idx
-        else:
-            idx = np.argmax(Clusters[2][1:, 4]) + 1
-            NewMask += Clusters[1] == idx
+        cfg = get_cfg()
+        cfg.merge_from_file(model_zoo.get_config_file(config_file))
+        # cfg.DATASETS.TRAIN = ("train",)
+        # cfg.DATASETS.TEST = ("val",)
+
+        cfg.NUM_GPUS = 1
+        cfg.DATALOADER.NUM_WORKERS = 2
+        cfg.MODEL.WEIGHTS = os.path.join(WEIGHTSFOLDER, 'peduncle.pth')
+        cfg.MODEL.DEVICE = 'cpu'
+        cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(classes)
+
+        cfg.INPUT.MAX_SIZE_TRAIN = input_size
+        cfg.INPUT.MAX_SIZE_TEST = input_size
+
+        cfg.INPUT.MIN_SIZE_TRAIN = input_size
+        cfg.INPUT.MIN_SIZE_TEST = input_size
+
+        cfg.OUTPUT_DIR = WEIGHTSFOLDER
+        os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
+
+        cfg.OUTPUT_DIR = WEIGHTSFOLDER  ##normally
+        # cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'segmenter', 'segmentation.pth')
+
+        cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8  # set the testing threshold for this model
+        cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5
+
+        self.iou_th = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST
+
+        print('initialising predictor')
+        self.predictor = DefaultPredictor(cfg)
+        print('detectron network loaded')
 
-        return (NewMask == 1)
+    def process_single(self, img):
+        """Processing a single image, input is str file_name"""
+        t1 = time.time()
+        outputs = self.predictor(img)
 
-    def preprocess(self, imRGB):
-        self.imRGB = imRGB
-        data = np.float64(self.imRGB)
-        data /= np.mean(data[data != 0]) / 80
-        data[data > 255] = 255
-        data = np.uint8(data)
+        outputs['instances'] = outputs['instances'][
+            torchvision.ops.nms(outputs['instances'].pred_boxes.tensor, outputs['instances'].scores, self.iou_th)]
 
-        data = np.float64(cv2.cvtColor(np.uint8(data), cv2.COLOR_RGB2HSV))
-        if self.contextType is not None:
-            data = self.contextualize(data)
+        # print(outputs['instances'].pred_masks[0,:,:].numpy())
+        # print(outputs["instances"])
 
-        data_reshaped = data.reshape([np.prod(data.shape[:2]), data.shape[2]])
-        data_reshaped -= self.MeanVec
-        data_reshaped /= self.MaxVec
+        # print(time.time() - t1)
+        # visualizer = Visualizer(img[:, :, ::-1], metadata=test_metadata, scale=0.8)
+        # vis = visualizer.draw_instance_predictions(outputs["instances"].to("cpu"))
+        # bgr_output = vis.get_image()[:, :, ::-1]
 
-        self.data_reshaped = data_reshaped
+        # return outputs, bgr_output
+        m = outputs['instances'].pred_masks.cpu()
 
-    def infer(self):
-        if '.pkl' in self.modelDict['model']:
-            mask_reshaped_proba = self.model.predict_proba(self.data_reshaped)[:, 1]
+        if m.shape[0] != 0:
+            return m[0, :, :]
         else:
-            mask_reshaped_proba = self.model.predict(self.data_reshaped)[:, 1]
-
-        self.mask_proba = mask_reshaped_proba.reshape(self.imRGB.shape[:2])
-        self.mask = self.mask_proba > 0.5
-
-    def postprocess(self):
-        Clusters = cv2.connectedComponentsWithStats(np.uint8(self.mask), 8, cv2.CV_32S)
-        Clusters = list(Clusters)
-
-        if Clusters[0] > 2:
-            Scores = np.zeros(Clusters[0] - 1)
-            for i in range(1, Clusters[0]):
-                cluster_mask_proba = ((Clusters[1] == i) * self.mask_proba)
-                Scores[i - 1] = np.mean(cluster_mask_proba[cluster_mask_proba != 0])
-
-            if np.sum(Scores == np.max(Scores)) != 1:  # Check if multiple clusters have same score
-                VectorLengths = np.zeros(np.sum(Scores == np.max(Scores)))
-                for i in range(np.sum(Scores == np.max(Scores))):
-                    Vector = Clusters[3][i + 1, :] - np.array(self.mask.shape, dtype='float64') / 2
-                    VectorLengths[i] = np.sqrt(np.sum(Vector ** 2))
-                self.mask = Clusters[1] == (np.argmin(VectorLengths) + 1)
-            else:
-                self.mask = Clusters[1] == (np.argmax(Scores) + 1)
-
-        Kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))
-        self.mask = cv2.dilate(np.uint8(self.mask), kernel=Kernel)
-        self.mask = cv2.erode(self.mask, kernel=Kernel)
-
-        im = flood_fill(self.mask, (0, 0), 1)
-        self.mask += np.uint8(im == 0)
+            return None
\ No newline at end of file
diff --git a/libs/vision/segmenter.py b/libs/vision/segmenter.py
index 9196d974086480ab170dd17746b93429c886dfcf..61cf30f581facb361238839105547575de6378af 100755
--- a/libs/vision/segmenter.py
+++ b/libs/vision/segmenter.py
@@ -8,6 +8,8 @@ import csv
 import random
 import operator
 from collections import OrderedDict
+
+import torchvision
 from tqdm import tqdm
 import time
 from itertools import groupby
@@ -87,6 +89,8 @@ class Segmentron():
         cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8  # set the testing threshold for this model
         cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5
 
+        self.iou_th = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST
+
         print('initialising predictor')
         self.predictor = DefaultPredictor(cfg)
         print('detectron network loaded')
@@ -98,15 +102,19 @@ class Segmentron():
         t1 = time.time()
         outputs = self.predictor(img)
 
+        outputs['instances'] = outputs['instances'][
+            torchvision.ops.nms(outputs['instances'].pred_boxes.tensor, outputs['instances'].scores, self.iou_th)]
+
         # print(outputs['instances'].pred_masks[0,:,:].numpy())
         # print(outputs["instances"])
 
         print(time.time() - t1)
-        visualizer = Visualizer(img[:, :, ::-1], metadata=test_metadata, scale=0.8)
-        vis = visualizer.draw_instance_predictions(outputs["instances"].to("cpu"))
-        bgr_output = vis.get_image()[:, :, ::-1]
+        # visualizer = Visualizer(img[:, :, ::-1], metadata=test_metadata, scale=0.8)
+        # vis = visualizer.draw_instance_predictions(outputs["instances"].to("cpu"))
+        # bgr_output = vis.get_image()[:, :, ::-1]
 
-        return outputs, bgr_output
+        # return outputs, bgr_output
+        return outputs
 
 
 if __name__ == '__main__':
diff --git a/libs/vision/test/D415_test1_1_single_rgb_raw.png b/libs/vision/test/D415_test1_1_single_rgb_raw.png
new file mode 100755
index 0000000000000000000000000000000000000000..1c9bda261d64e7b436a544296efc340d33d22493
Binary files /dev/null and b/libs/vision/test/D415_test1_1_single_rgb_raw.png differ
diff --git a/libs/vision/tray.py b/libs/vision/tray.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2a57a97acad5bd9b603906e42b68f7e9cb8b498
--- /dev/null
+++ b/libs/vision/tray.py
@@ -0,0 +1,170 @@
+import os
+import time
+
+import cv2
+
+import numpy as np
+
+from detectron2.engine import DefaultPredictor
+from detectron2.config import get_cfg
+from detectron2 import model_zoo
+
+from skimage.segmentation import flood_fill
+from matplotlib import pyplot as plt
+
+WEIGHTSFOLDER = os.path.join(os.path.dirname(__file__), 'models', 'segmenter')
+
+
+class FruitSegmentron():
+    Coordinates = None
+    Masks = []
+
+    def __init__(self) -> None:
+        print('initialising detectron')
+        config_file = 'COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml'
+        classes = ['Tomato fruit']
+
+        cfg = get_cfg()
+        cfg.merge_from_file(model_zoo.get_config_file(config_file))
+        # cfg.DATASETS.TRAIN = ("train",)
+        # cfg.DATASETS.TEST = ("val",)
+
+        cfg.NUM_GPUS = 1
+        cfg.DATALOADER.NUM_WORKERS = 2
+        cfg.MODEL.WEIGHTS = os.path.join(WEIGHTSFOLDER, 'segmentation.pth')
+        cfg.MODEL.DEVICE = 'cpu'
+        cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(classes)
+
+        cfg.INPUT.MAX_SIZE_TRAIN = 1280
+        cfg.INPUT.MAX_SIZE_TEST = 1280
+
+        cfg.INPUT.MIN_SIZE_TRAIN = 720
+        cfg.INPUT.MIN_SIZE_TEST = 720
+
+        cfg.OUTPUT_DIR = WEIGHTSFOLDER
+        os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
+
+        cfg.OUTPUT_DIR = WEIGHTSFOLDER  ##normally
+
+        cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8  # set the testing threshold for this model
+        cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5
+
+        self.iou_th = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST
+
+        print('initialising predictor')
+        self.predictor = DefaultPredictor(cfg)
+        print('detectron network loaded')
+
+    def process_single(self, file_name):
+        """Processing a single image, input is str file_name"""
+
+        img = cv2.imread(file_name)
+        t1 = time.time()
+        outputs = self.predictor(img)
+        print(time.time() - t1)
+
+        self.m = np.zeros(outputs['instances'].pred_masks.cpu().shape[1:])
+        dim = (160, 160)
+
+        self.Masks = outputs['instances'].pred_masks.numpy()
+
+        self.Coordinates = np.zeros([len(outputs['instances']), 5])
+        for i in range(len(outputs['instances'])):
+            m_ = np.asarray(outputs['instances'].pred_masks.cpu()[i, :, :])
+            self.m += (i + 1) * m_
+
+            # self.Masks.append(outputs['instances'].pred_masks.numpy()[i])
+
+            Cluster = cv2.connectedComponentsWithStats(np.uint8(m_), 8, cv2.CV_32S)
+            self.Coordinates[i, :] = Cluster[2][1, :]
+
+        imRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+        imRGB_masked = cv2.bitwise_and(imRGB, imRGB, mask=np.uint8(self.m != 0))
+        self.process_coordinates()
+
+        count_up, count_down, count_side = 0, 0, 0
+        ResultsDict = {'Images': [], 'Keys': [], 'Coordinates': [], 'Masks': self.Masks}
+        # ResultsDict = {'Images': [], 'Keys': [], 'Coordinates': [], 'Masks': []}
+
+        for i in range(self.Coordinates.shape[0]):
+            C = np.zeros(4, dtype=np.uint16)
+            C[0] = self.Coordinates[i, 0] - 5
+            C[1] = self.Coordinates[i, 1] - 5
+            C[2] = self.Coordinates[i, 2] + 10
+            C[3] = self.Coordinates[i, 3] + 10
+
+            # ResultsDict['Masks'].append(np.uint8(self.m[(C[1] + 5):(C[3] - 10 + C[1] + 5), (C[0] + 5):(C[2] - 10 + C[0] + 5)]) != 0)
+
+            im_c = imRGB_masked[C[1]:(C[1] + C[3]), C[0]:(C[0] + C[2]), :]
+            m_c = self.m[C[1]:(C[1] + C[3]), C[0]:(C[0] + C[2])]
+
+            RS = int((dim[0] - im_c.shape[0]) / 2)
+            CS = int((dim[0] - im_c.shape[1]) / 2)
+
+            im_ = np.zeros([dim[0], dim[1], 3], dtype=np.uint8)
+
+            if (RS > 0) & (CS > 0):
+                im_[RS:(RS + im_c.shape[0]), CS:(CS + im_c.shape[1]), :] = im_c
+            elif (RS <= 0) & (CS > 0):
+                im_[:, CS:(CS + im_c.shape[1]), :] = im_c[abs(RS):(abs(RS) + 160), :, :]
+            elif (RS > 0) & (CS <= 0):
+                im_[RS:(RS + im_c.shape[0]), :, :] = im_c[:, abs(CS):(abs(CS) + 160), :]
+            elif (RS <= 0) & (CS <= 0):
+                im_ = im_c[abs(RS):(abs(RS) + 160), abs(CS):(abs(CS) + 160), :]
+
+            if self.Coordinates[i, 5] == 0:
+                ResultsDict['Keys'].append('up_' + str(count_up))
+                count_up += 1
+            elif self.Coordinates[i, 5] == 1:
+                ResultsDict['Keys'].append('side_' + str(count_side))
+                count_side += 1
+            elif self.Coordinates[i, 5] == 2:
+                ResultsDict['Keys'].append('down_' + str(count_down))
+                count_down += 1
+
+            ResultsDict['Images'].append(im_)
+            ResultsDict['Coordinates'].append(C)
+
+        return ResultsDict
+
+    def process_coordinates(self):
+        index = np.argsort(self.Coordinates[:, 0])
+        print(index)
+        self.Coordinates = self.Coordinates[index, :]
+        self.Masks = self.Masks[index]
+
+        Rows = np.linspace(0,
+                           self.Coordinates.shape[0],
+                           int(self.Coordinates.shape[0] / 3 + 1),
+                           dtype=np.uint8)
+
+        for i in range(len(Rows) - 1):
+            row_index = np.argsort(self.Coordinates[Rows[i]:Rows[i + 1], 1])
+            self.Coordinates[Rows[i]:Rows[i + 1], :] = self.Coordinates[
+                                                       row_index + Rows[i],
+                                                       :]
+            self.Masks[i * 3: i * 3 + 3] = self.Masks[i * 3: i * 3 + 3][row_index]
+
+        self.Coordinates = np.c_[self.Coordinates, [0, 1, 2] * np.uint8(self.Coordinates.shape[0] / 3)]
+
+if __name__ == '__main__':
+    file = os.path.join(os.path.dirname(__file__), 'test', 'D415_test1_1_single_rgb_raw.png')
+
+    segmenter = FruitSegmentron()
+    tomato_list = segmenter.process_single(file)
+
+    tomatos = []
+
+    for idx, key in enumerate(tomato_list['Keys']):
+        orientation = key.split('_')[0]
+        tomato = {
+            'id': idx + 1,
+            'properties': {
+                'orientation': orientation,
+            },
+            'traits': {}
+        }
+
+        tomatos.append(tomato)
+
+    print(tomatos)
diff --git a/main.py b/main.py
index 4e18dcc8c9b0b2909b9fbfe00fa11ca3291b3062..485004b0170db3bc7543e6992ed2ccebbfcb281d 100644
--- a/main.py
+++ b/main.py
@@ -23,7 +23,7 @@ os.environ["INVITE_ASSETS"] = os.path.join(
 Config.set('kivy', 'keyboard_mode', 'systemanddock')
 Window.softinput_mode = "below_target"
 
-Window.maximize()
+# Window.maximize()
 
 
 class InviteApp(MDApp):
@@ -48,6 +48,7 @@ class InviteApp(MDApp):
         camera = self.ids['camera']
 
     def transition(self, screen, trial=None, batch=None):
+        # print(screen, trial, batch)
         manager = self.root.ids.screen_manager
         if trial:
             self.trial = trial