Commit a3eb9db1 authored by Chauhan, Aneesh's avatar Chauhan, Aneesh
Browse files

Merge branch 'first_derv_csv_json_combined' into 'master'

Using first derivate as features, in order to take rate of change, instead of...

See merge request !1
parents 14b88867 eaf900fe
%% Cell type:code id:bb258807 tags:
``` python
## Note: Currently we are using basic values like left/right/front for location classes. And these are hard coded
#In future version, we will map to positions where the object moved, not just high-level movements
```
%% Cell type:code id:dc4557a6 tags:
``` python
#Import the Classifier, feature extractor
from common_utils.classifier import Classifier
from common_utils.feature_extraction import ExtractFeatures
from common_utils.top_view_pos_extraction import ExtractPositions
import numpy as np
import os
import glob
import json
```
%% Cell type:code id:5946b736 tags:
``` python
#Mention the directory where the scenario models are saved
#Note that each model directory should have 3 pkl files which constitute a single model
# label_encoder.pkl model.pkl X_scaler_model.pkl
model_dir = "/home/ur5/sandbox/har_scenario_modeling/models/scenario2"
#Instantiate the model
my_model = Classifier(model_dir)
```
%% Cell type:code id:7c0728ea tags:
``` python
#Load data from the csv file and extract the features
side_view_data_path = "/home/ur5/remote_dir/KB Autonomous Robotics/2021/LabGradingSortingDataset/top_and_side_view-all-videos/Side-view-KB00-05-03-2021/scenario-1/mandarins"
top_view_data_path = "/home/ur5/remote_dir/KB Autonomous Robotics/2021/LabGradingSortingDataset/uldi-processed/Top-view-KB00-05-03-2021/scenario-1/mandarins"
#data_path is the folder which is recursively parsed to search for the csv files (assumes that the format is correct)
fe = ExtractFeatures(side_view_data_path)
X, Labels = fe.generate_features()
print(X.shape)
```
%% Output
(24, 170)
%% Cell type:code id:f9c7b395 tags:
``` python
top_view_pos = ExtractPositions(top_view_data_path)
# print(top_view_pos)
# print(top_view_pos.hand_xy)
# X, Labels = fe.generate_features()
```
%% Cell type:code id:3fba11cf tags:
``` python
print(X.shape)
```
%% Output
(24, 170)
%% Cell type:markdown id:2c8f3d3e tags:
# Example prediction usage
%% Cell type:code id:a5322149 tags:
``` python
# Examples of predictions
# x = X[0,:].reshape(1,-1) #Just one recording
# prediction = my_model.predict(x)
# print(prediction, Labels[0])
# #Some four recordings
x = X[10:18]
hand_pos = top_view_pos.hand_xy[10:18]
# x = X[:,:]
prediction = my_model.predict(x)
print(hand_pos)
print(prediction, '\n' , Labels[10:18])
```
%% Output
[[375 260]
[491 460]
[431 293]
[790 484]
[977 276]
[756 277]
[898 504]
[968 308]]
['large_left' 'large_left' 'large_left' 'large_left' 'small_right'
'small_right' 'small_right' 'small_right']
[['large']
['large']
['large']
['large']
['small']
['small']
['small']
['small']]
%% Cell type:code id:2c18c877 tags:
``` python
classes = np.unique(prediction)
class_placement_pos = {}
for c in classes:
# print(np.where(prediction==c))
class_placement_pos[c] = np.median(hand_pos[np.where(prediction==c)], axis=0)
print(class_placement_pos)
```
%% Output
{'large_left': array([461. , 376.5]), 'small_right': array([933. , 292.5])}
%% Cell type:markdown id:8a1ead12 tags:
# Example usage per video directory
### where
### Data organization is assumed such that, for data folder corresponding to a scenario,
### there is a csv corresponding to side_view and a json corresponding to top_view
%% Cell type:code id:541adcab tags:
``` python
# Load data from the csv and json files and extract the features
#An example Scanrio directory where I have saved a csv and json for each video (except vid_18)
# data_dir = "/home/ur5/sandbox/har_scenario_modeling/data/mandarins"
data_dir = '/home/ur5/sandbox/har_scenario_modeling/data-5-08-2021/bananas-tv-mp4'
results = {}
for dname in os.listdir(data_dir):
vid_dir = os.path.join(data_dir, dname)
if os.path.isdir(vid_dir):
sv_csv_f = glob.glob(str(vid_dir)+"/*.csv")
tv_json_f = glob.glob(str(vid_dir)+"/*.json")
if sv_csv_f and tv_json_f: #both files should exist
fe = ExtractFeatures(vid_dir)
vid_feats, vid_labels = fe.generate_features_single_video(sv_csv_f[0])#, draw_plots=True)
tv_pos = ExtractPositions(data_dir)
hand_pos = tv_pos.load_data_single_video(tv_json_f[0])
quality = my_model.predict(vid_feats)
results[vid_dir] = [quality, hand_pos]
```
%% Output
[1084 387]
[455 314]
%% Cell type:code id:43ab969e tags:
``` python
print(results)
```
%% Output
{'/home/ur5/sandbox/har_scenario_modeling/data-5-08-2021/bananas-tv-mp4/vid_1': [array(['ripe_right'], dtype=object), array([1084, 387])], '/home/ur5/sandbox/har_scenario_modeling/data-5-08-2021/bananas-tv-mp4/vid_3': [array(['unripe_left'], dtype=object), array([455, 314])]}
%% Cell type:code id:c5687c21 tags:
``` python
#For given results, what are the hand positions
hand_poses = {}
for val in results.values():
q=val[0].item()
pos=val[1]
# print(pos)
try:
hand_poses[q] = np.vstack( (hand_poses[q], pos))
except:
hand_poses[q] = pos
#Finally get the final hand poses
scenarios_dict = {}
for q in hand_poses.keys():
# scenarios_dict[q] = np.median(hand_poses[q], axis=0).astype(int)
if len(hand_poses[q]) == 2: #If is only one position
scenarios_dict[q] = hand_poses[q].astype(int).tolist()
else:
scenarios_dict[q] = np.median(hand_poses[q], axis=0).astype(int).tolist()
print(scenarios_dict)
#Now save the file
scenario_f = '/home/ur5/sandbox/har_scenario_modeling/application_scenario_jsons/mandarins/scenario_hand_pos.json'
with open(scenario_f, 'w') as fp:
json.dump(scenarios_dict, fp, indent=4)
```
%% Output
{'ripe_right': [1084, 387], 'unripe_left': [455, 314]}
%% Cell type:markdown id:04b96fac tags:
# Load json file
%% Cell type:code id:d58341ab tags:
``` python
#Load the scenario file
with open(scenario_f) as json_file:
scenario_json = json.load(json_file)
print(scenario_json)
```
%% Output
{'small_right': [979, 319], 'large_left': [420, 214]}
%% Cell type:code id:db33c4ce tags:
``` python
```