diff --git a/image_operations.py b/image_operations.py index 916d40211449a8d542125ba64748a74b774ed283..820ea7e94e388f9f80cb6cc8bba8adde70ccaf1b 100755 --- a/image_operations.py +++ b/image_operations.py @@ -721,6 +721,142 @@ def display_outputs(file_name, img_output, img_output_post=None, show_post_proce plt.show(block=True) +def grab_objects(img, preds, cfg): + # if cfg.operation == "object_detection": + # for p in preds: + # cl = p.cl - 1 + # b = p.box + # # b = bops.xywh2xyxy(b) + + + # class_name = cfg.class_names[cl] + # b = list(map(int, b)) + # x1, x2, y1, y2 = b + + + # if cfg.crop_style == "fixed": + # cs = int(cfg.crop_size/2) + # mx = int((x2+x1)/2) + # my = int((y2+y1)/2) + + # cx1 = max(0, int(mx - cs) - cfg.crop_pad) + # cy1 = max(0, int(my - cs) - cfg.crop_pad) + # cx2 = min(img.shape[1], int(mx + cs) + cfg.crop_pad) + # cy2 = min(img.shape[0], int(my + cs) + cfg.crop_pad) + + + # cropped_img = img[cy1:cy2, cx1:cx2] + + # if cropped_img.shape[0] == cs*2 and cropped_img.shape[1] == cs*2: + # cropped_filename = os.path.basename(file_name) + # cropped_filename = os.path.splitext(cropped_filename)[0] + # cropped_filename = "{}_{}_{}.png".format(cropped_filename, cx1, cy1) + # cv2.imwrite("{}/{}/{}".format(cfg.experiment_folder_models_target_model_ver_weights_cropped, class_name, cropped_filename), cropped_img) + + + # elif cfg.crop_style == "predicted": + # cx1 = x1 + # cy1 = y1 + # cx2 = x2 + # cy2 = y2 + + # if cfg.crop_square_crop: + # m1 = int((x2+x1)/2) + # m2 = int((y2+y1)/2) + # lx = int((x2-x1)/2) + # wy = int((y2-y1)/2) + # lorw = max(lx, wy) + # cx1 = int(m1 - lorw) + # cx2 = int(m1 + lorw) + # cy1 = int(m2 - lorw) + # cy2 = int(m2 + lorw) + + # cropped_img = img[cy1:cy2, cx1:cx2] + + + # cropped_filename = os.path.basename(file_name) + # cropped_filename = os.path.splitext(cropped_filename)[0] + # cropped_filename = "{}_{}_{}.png".format(cropped_filename, cx1, cy1) + # cv2.imwrite("{}/{}/{}".format(cfg.experiment_folder_models_target_model_ver_weights_cropped, class_name, cropped_filename), cropped_img) + + cropped_objects = [] + if cfg.operation == "instance_segmentation": + black = np.zeros((img.shape[0] , img.shape[1], 3), dtype = "uint8") + + for p in preds: + cl = p.cl - 1 + b = p.box + b = bops.xywh2xyxy(b) + m = p.mask + b = list(map(int, b)) + x1, x2, y1, y2 = b + + mask_array = (np.asarray(m*1)).astype(np.uint8) + mask_shaped = mask_array.reshape(img.shape[0], img.shape[1], 1) + mask_out = cv2.bitwise_and(img, img, mask=mask_shaped) + output_black = cv2.bitwise_or(black, mask_out) + + + if cfg.crop_style == "fixed": + cs = int(cfg.crop_size/2) + mx = int((x2+x1)/2) + my = int((y2+y1)/2) + + cx1 = max(0, int(mx - cs)) + cx2 = min(img.shape[1], int(mx + cs)) + cy1 = max(0, int(my - cs)) + cy2 = min(img.shape[0], int(my + cs)) + + if cfg.crop_transparent == 0: + cropped_img = img[cy1:cy2, cx1:cx2] + if cfg.crop_transparent == 1: + cropped_img = output_black[cy1:cy2, cx1:cx2] + if cfg.crop_transparent == 2: + cropped_img = output_black[cy1:cy2, cx1:cx2] + tmp = cropped_img.copy() + tmp = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY) + _,alpha = cv2.threshold(tmp,0,255,cv2.THRESH_BINARY) + b, g, r = cv2.split(cropped_img) + rgba = [b,g,r, alpha] + cropped_img = cv2.merge(rgba,4) + + + elif cfg.crop_style == "predicted": + if cfg.crop_square_crop == True: + m1 = int((x2+x1)/2) + m2 = int((y2+y1)/2) + lx = int((x2-x1)/2) + wy = int((y2-y1)/2) + lorw = max(lx, wy) + cx1 = int(m1 - lorw) + cx2 = int(m1 + lorw) + cy1 = int(m2 - lorw) + cy2 = int(m2 + lorw) + + cx1 = max(0, x1) + cx2 = min(img.shape[1], x2) + cy1 = max(0, y1) + cy2 = min(img.shape[0], y2) + + if cfg.crop_transparent == 0: + cropped_img = img[cy1:cy2, cx1:cx2] + if cfg.crop_transparent == 1: + cropped_img = output_black[cy1:cy2, cx1:cx2] + if cfg.crop_transparent == 2: + try: + cropped_img = output_black[cy1:cy2, cx1:cx2] + tmp = cropped_img.copy() + tmp = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY) + _,alpha = cv2.threshold(tmp,0,255,cv2.THRESH_BINARY) + b, g, r = cv2.split(cropped_img) + rgba = [b,g,r, alpha] + cropped_img = cv2.merge(rgba,4) + except: + pass + + obj_dict = {"img": cropped_img, "offsets": [cx1, cx2, cy1, cy2]} + cropped_objects.append(obj_dict) + def crop_images(img, preds, file_name, cfg): for cl in cfg.class_names: