saracandu commited on
Commit
6b13629
·
verified ·
1 Parent(s): a9d64ee

Delete trajectories.py

Browse files
Files changed (1) hide show
  1. trajectories.py +0 -126
trajectories.py DELETED
@@ -1,126 +0,0 @@
1
- import os
2
- import csv
3
- import torch
4
- from torch.utils.data import Dataset
5
- import math
6
- import numpy as np
7
- import copy
8
-
9
- from utils import from_string_to_formula
10
-
11
-
12
- def get_dataset(dataname, datafolder='data', indexes=None):
13
- # TODO: add times if available
14
- # load dataset
15
- with open(datafolder + os.path.sep + dataname + os.path.sep + 'labels.csv', 'r') as f:
16
- label_reader = csv.reader(f)
17
- labels = next(label_reader)
18
- labels = [int(i) for i in labels]
19
-
20
- data = []
21
- with open(datafolder + os.path.sep + dataname + os.path.sep + 'data.csv', 'r') as f:
22
- data_reader = csv.reader(f)
23
- header = next(data_reader)
24
- n = len(header)
25
-
26
- for _, row in enumerate(data_reader):
27
- sublists = [[] for _ in range(n)]
28
- for i, item in enumerate(row):
29
- sublists[i % n].append(float(item))
30
- data.append(sublists)
31
- if indexes is not None:
32
- return torch.tensor(data)[:, indexes, :], torch.tensor(labels)
33
- return torch.tensor(data), torch.tensor(labels)
34
-
35
-
36
- class TrajectoryDataset(Dataset):
37
- def __init__(self, device, data_fn=None, dataname=None, indexes=None, x=None, y=None):
38
- if (x is None) or (y is None):
39
- x, y = data_fn(dataname, indexes=indexes)
40
- self.trajectories = x.to(device)
41
- self.labels = y.to(device)
42
- self.nvars = x.shape[1]
43
- self.npoints = x.shape[-1]
44
- self.mean = torch.zeros(self.nvars).to(device)
45
- self.std = torch.zeros(self.nvars).to(device)
46
- self.normalized = False
47
-
48
- def reshape_mean_std(self):
49
- rep_mean = torch.cat([self.mean[i].repeat(
50
- self.trajectories.shape[0], self.trajectories.shape[-1]).unsqueeze(1) for i in range(self.nvars)], dim=1)
51
- rep_std = torch.cat([self.std[i].repeat(
52
- self.trajectories.shape[0], self.trajectories.shape[-1]).unsqueeze(1) for i in range(self.nvars)], dim=1)
53
- return rep_mean.to(self.trajectories.device), rep_std.to(self.trajectories.device)
54
-
55
- def normalize(self):
56
- self.mean = torch.tensor([self.trajectories[:, i, :].mean() for i in range(self.nvars)])
57
- self.std = torch.tensor([self.trajectories[:, i, :].std() for i in range(self.nvars)])
58
- rep_mean, rep_std = self.reshape_mean_std()
59
- self.trajectories = (self.trajectories - rep_mean) / rep_std
60
- self.normalized = True
61
-
62
- def inverse_normalize(self):
63
- rep_mean, rep_std = self.reshape_mean_std()
64
- self.trajectories = (self.trajectories * rep_std) + rep_mean
65
- self.normalized = False
66
-
67
- def time_scaling(self, phi, phi_timespan=100):
68
- # npoints is the number of points in the original trajectory (hence the original formulae)
69
- current_one_percent = self.npoints/phi_timespan # in npoints
70
- phi_str = str(phi)
71
- temporal_start_idx = [i for i in range(len(phi_str)) if phi_str.startswith('[', i)]
72
- temporal_middle_idx = [i for i in range(len(phi_str)) if phi_str.startswith(',', i)]
73
- temporal_end_idx = [i for i in range(len(phi_str)) if phi_str.startswith(']', i)]
74
- start_idx = temporal_start_idx[0] if len(temporal_start_idx) > 0 else None
75
- str_list = [phi_str[:start_idx]]
76
- new_intervals_list = []
77
- for i, s, m, e in zip(range(len(temporal_start_idx)), temporal_start_idx, temporal_middle_idx,
78
- temporal_end_idx):
79
- right_unbound = True if phi_str[e-1] == 'f' else False
80
- right_bound = -1. if right_unbound else float(phi_str[m+1:e])
81
- current_time_interval = [float(phi_str[s+1:m]), right_bound] # this is the original interval
82
- # these are hte changes I was doing (so this is the main part that should be changed)
83
- current_percentage = 0 if right_unbound else current_time_interval[1] - current_time_interval[0]
84
- new_left = math.floor(current_time_interval[0]*current_one_percent)
85
- new_time_interval = [new_left, min([new_left + math.ceil(current_percentage*current_one_percent),
86
- self.npoints])]
87
- new_right_str = 'inf' if right_unbound else str(new_time_interval[1])
88
- # from now on it is changing the formula parameters
89
- new_intervals_list += ['[' + str(new_time_interval[0]) + ',' + new_right_str + ']']
90
- idx = temporal_start_idx[i+1] if i < len(temporal_start_idx) - 1 else None
91
- str_list.append(phi_str[e+1:idx])
92
- new_phi_str = ''
93
- for i in range(len(new_intervals_list)):
94
- new_phi_str += str_list[i]
95
- new_phi_str += new_intervals_list[i]
96
- new_phi_str += str_list[-1]
97
- return from_string_to_formula(new_phi_str)
98
-
99
- def __len__(self):
100
- return self.trajectories.shape[0]
101
-
102
- def __getitem__(self, idx):
103
- return self.trajectories[idx], self.labels[idx]
104
-
105
-
106
- # dataset = TrajectoryDataset(data_fn=get_dataset, dataname='robot4', indexes=None, device='cpu')
107
- # print(dataset.trajectories.shape, dataset.labels.shape)
108
- # train_size = int(0.8 * len(dataset))
109
- # test_size = int(0.5 * (len(dataset) - int(0.5 * 0.8 * len(dataset))))
110
- # val_size = len(dataset) - train_size - test_size
111
- # train_subset, test_subset, val_subset = torch.utils.data.random_split(dataset, [train_size, test_size, val_size])
112
- # train_dataset = TrajectoryDataset(x=dataset.trajectories[train_subset.indices],
113
- # y=dataset.labels[train_subset.indices])
114
- # train_dataset.normalize()
115
- # train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=16, shuffle=True)
116
- # test_dataset = TrajectoryDataset(x=dataset.trajectories[test_subset.indices], y=dataset.labels[test_subset.indices])
117
- # test_dataset.normalize()
118
- # test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=16, shuffle=False)
119
- # validation_dataset = TrajectoryDataset(x=dataset.trajectories[val_subset.indices],
120
- # y=dataset.labels[val_subset.indices])
121
- # validation_dataset.normalize()
122
- # validation_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=16, shuffle=False)
123
-
124
- # train_dataset.inverse_normalize()
125
- # test_dataset.inverse_normalize()
126
- # validation_dataset.inverse_normalize()