Commit cb7cd2fa authored by Chauhan, Aneesh's avatar Chauhan, Aneesh
Browse files

New models and inference code ready, and the supporting libraries as well...

New models and inference code ready, and the supporting libraries as well (also part of the previous comment
parent 5299ee30
import numpy as np
from sklearn import preprocessing
import pickle
import os
import json
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestClassifier
class Classifier:
def __init__(self, model_dir):
self.load(model_dir)
# self.model = RandomForestModel()
def load(self, model_dir):
# save X_scaler_model and the ML model to disk
le_f = os.path.join(model_dir, "label_encoder.pkl")
self.le = pickle.load(open(le_f, 'rb'))
X_scaler_model_f = os.path.join(model_dir, "X_scaler_model.pkl")
self.X_scaler_model = pickle.load(open(X_scaler_model_f, 'rb'))
model_f = os.path.join(model_dir, "model.pkl")
self.model = pickle.load(open(model_f, 'rb'))
def predict(self, x):
x_scaled = self.X_scaler_model.transform(x)
prediction = self.model.predict(x_scaled)
return self.decode_labels(prediction)
def predict_and_save(self, x, prediction_file):
x_scaled = self.X_scaler_model.transform(x)
prediction = self.model.predict(x_scaled)
textual_prediction = self.decode_labels(prediction)
# print(unique_predictions)
prediction_dict = {}
for ix, p in enumerate(textual_prediction):
quality_location = p.split('_')
quality = quality_location[0]
location = quality_location[1]
prediction_dict[quality] = location
print(prediction_dict)
with open(prediction_file, 'w') as fp:
json.dump(prediction_dict, fp, indent=4)
def get_class_names(self):
unique_textual_labels = self.le.classes_
return unique_textual_labels
def get_num_classes(self):
unique_textual_labels = self.le.classes_
return len(unique_textual_labels)
#Convert numerical labels to textual labels
def decode_labels(self, numerical_labels):
return self.le.inverse_transform(numerical_labels)
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset
class Dataset(torch.utils.data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, data, labels):
self.labels = labels
self.data = data
def __len__(self):
'total number of samples'
return len(self.data)
def __getitem__(self, idx):
'get sample at index <idx>'
# Select sample
_X = self.data[idx, :].astype(np.float64)
#print(_X.dtype)
_y = self.labels[idx].astype(np.float64)
return _X, _y
# Main differnce wrt to the "Dataset" class is that we have no knowledge of the label
class PredictionDataset(torch.utils.data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, data):
self.data = data
def __len__(self):
'total number of samples'
return len(self.data)
def __getitem__(self, idx):
'get sample at index <idx>'
# Select sample
_X = self.data[idx, :].astype(np.float64)
#print(_X.dtype)
return _X
\ No newline at end of file
{
"joints": [
"left_shoulder_y","left_shoulder_x",
"right_shoulder_y","right_shoulder_x",
"left_elbow_y","left_elbow_x",
"right_elbow_y","right_elbow_x",
"left_wrist_y","left_wrist_x",
"right_wrist_y","right_wrist_x"
],
"labels": [
"quality"
]
}
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
from torch.nn.functional import gelu
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
super(TemporalBlock, self).__init__()
#print("Hello 2: ", n_inputs, n_outputs, kernel_size, stride, dilation, padding)
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU() #ANEESH: Using GELU: https://mlfromscratch.com/activation-functions-explained/#/
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
#print("x size: ", x.size())
out = self.net(x)
#print("Out size: ", out.size())
res = x if self.downsample is None else self.downsample(x)
#print("Res size: ", res.size())
return self.relu(out + res) #ANEESH: Using GELU: https://mlfromscratch.com/activation-functions-explained/#/
# return gelu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.1):
super(TemporalConvNet, self).__init__()
layers = []
num_levels = len(num_channels)
# print("Hello: ", num_inputs, num_channels, num_levels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i-1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size, dropout=dropout)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
import torch.nn.functional as F
from torch import nn
from common_utils.tcn import TemporalConvNet
class TCN(nn.Module):
def __init__(self, input_size, output_size, num_channels, kernel_size, dropout=0.):
super(TCN, self).__init__()
# print('0: ', input_size, output_size, num_channels, kernel_size, dropout)
self.tcn = TemporalConvNet(input_size, num_channels, kernel_size=kernel_size, dropout=dropout)
self.linear = nn.Linear(num_channels[-1], output_size)
# self.init_weights() ANEESH: I want scaled output
def init_weights(self):
self.linear.weight.data.normal_(0, 0.001)
def forward(self, inputs):
"""Inputs have to have dimension (N, C_in, L_in)"""
# print("Inputs: ", inputs)
y1 = self.tcn(inputs) # input should have dimension (N, C, L)
# print("inputs shape: ", inputs.shape)
# print("y1 shape: ", y1.shape)
# o = self.linear(F.relu(y1[:, :, -1]))
o = self.linear(y1[:, :, -1])
# print(" O shape: ", o.shape, o)
return o
This diff is collapsed.
{
"name": "/home/ur5/remote_dir/KB Autonomous Robotics/2021/LfDExperiment_20210805/scenarios/bananas/bananas-tv-mp4/vid1/tv_vid_1.mp4",
"hand_xyc": [
1105,
387
],
"category": "ripe"
}
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment