Skip to content

Commit 291ed03

Browse files
committed
error fix
1 parent d99fc75 commit 291ed03

11 files changed

+432
-671
lines changed

generator.py

+24-31
Original file line numberDiff line numberDiff line change
@@ -15,16 +15,18 @@
1515

1616

1717
class data_augmentator():
18+
# class for data augmentation
1819

1920
def __init__(self,f_prefix, num_of_data, seq_length, val_percent):
2021

2122
self.base_train_path = 'data/train/'
2223
self.base_validation_path = 'data/validation/'
2324

25+
# list of angles will be use for rotation
2426
self.angles = list(range(0,360,30))
2527
self.num_of_data = np.clip(num_of_data, 0, len(self.angles) -1)
26-
self.num_validation_data = math.ceil(self.num_of_data * val_percent)
27-
self.num_train_data = self.num_of_data - self.num_validation_data
28+
self.num_validation_data = math.ceil(self.num_of_data * val_percent) # number of validation dataset
29+
self.num_train_data = self.num_of_data - self.num_validation_data # number of train dataset
2830
print("For each dataset -----> Number of additional training dataset: ", self.num_train_data, " Number of validation dataset: ", self.num_validation_data)
2931

3032
self.num_validation_data =+1
@@ -35,12 +37,13 @@ def __init__(self,f_prefix, num_of_data, seq_length, val_percent):
3537

3638
self.dataloader = DataLoader(f_prefix, 1, seq_length , 0 ,forcePreProcess = True, infer = False, generate=True)
3739

38-
40+
# noise parameter definition
3941
self.noise_std_min = 0.05
4042
self.noise_std_max = 0.15
4143
self.noise_std = random.uniform(self.noise_std_min, self.noise_std_max)
4244
self.noise_mean = 0.0
4345

46+
# remove datasets from directories for new creation
4447
self.clear_directories(self.base_train_path)
4548
self.clear_directories(self.base_validation_path, True)
4649
self.random_dataset_creation()
@@ -67,39 +70,38 @@ def random_dataset_creation(self):
6770
# Get the sequence
6871
x_seq,d_seq ,numPedsList_seq, PedsList_seq = x[0], d[0], numPedsList[0], PedsList[0]
6972

73+
# convert dense vector
7074
x_seq , lookup_seq = self.dataloader.convert_proper_array(x_seq, numPedsList_seq, PedsList_seq)
7175

7276
if dataset_pointer_ins is not self.dataloader.dataset_pointer:
7377
if self.dataloader.dataset_pointer is not 0:
7478
whole_dataset.append(dataset_instances)
7579
dataset_instances = {}
76-
random_angles = random.sample(self.angles, self.num_of_data)
77-
self.noise_std = random.uniform(self.noise_std_min, self.noise_std_max)
80+
random_angles = random.sample(self.angles, self.num_of_data) # sample new angle
81+
self.noise_std = random.uniform(self.noise_std_min, self.noise_std_max) #sample new noise
7882
print("Dataset creation for: ", file_name, " angles: ", random_angles)
7983

8084
dataset_pointer_ins = self.dataloader.dataset_pointer
8185

82-
83-
84-
#self.add_element_to_dict(dataset_instances, (dir_name, file_name, ''), self.submision_seq_preprocess(x_seq, self.seq_length, lookup_seq))
85-
8686
for index, angle in enumerate(random_angles):
8787
self.noise_std = random.uniform(self.noise_std_min, self.noise_std_max)
88+
# modify and preprocess dataset
8889
modified_x_seq = self.submision_seq_preprocess(self.handle_seq(x_seq, lookup_seq, PedsList_seq, angle), self.seq_length, lookup_seq)
90+
# store modified data points to dict
8991
self.dataloader.add_element_to_dict(dataset_instances, (dir_name, file_name, index), modified_x_seq)
9092

9193
end = time.time()
9294
print('Current file : ', file_name,' Processed trajectory number : ', batch+1, 'out of', self.dataloader.num_batches, 'trajectories in time', end - start)
9395

94-
96+
# write modified datapoints to txt files
9597
whole_dataset.append(dataset_instances)
9698
create_directories(os.path.join(self.f_prefix, self.base_validation_path), self.dataloader.get_all_directory_namelist())
9799
self.write_modified_datasets(whole_dataset)
98100

99101

100102
def handle_seq(self, x_seq, lookup_seq, PedsList_seq, angle):
103+
# add noise and rotate a trajectory
101104
vectorized_x_seq, first_values_dict = vectorize_seq(x_seq, PedsList_seq, lookup_seq)
102-
#print("x_seq: %s"%vectorized_x_seq)
103105
modified_x_seq = vectorized_x_seq.clone()
104106
mean = torch.FloatTensor([self.noise_mean, self.noise_mean])
105107
stddev =torch.FloatTensor([self.noise_std, self.noise_std])
@@ -108,41 +110,31 @@ def handle_seq(self, x_seq, lookup_seq, PedsList_seq, angle):
108110
for ind, frame in enumerate(vectorized_x_seq):
109111
for ped in PedsList_seq[ind]:
110112
selected_point = frame[lookup_seq[ped], :]
111-
#print("selected point : %s"%selected_point)
113+
# rotate a frame point
112114
rotated_point = rotate(origin, selected_point, math.radians(angle))
113-
#print("after rotation: %s"%(rotated_point))
114115
noise = torch.normal(mean, stddev).clone()
115-
#print("noise %s"%noise)
116+
# add random noise
116117
modified_x_seq[ind, lookup_seq[ped], 0] = rotated_point[0] + noise[0]
117118
modified_x_seq[ind, lookup_seq[ped], 1] = rotated_point[1] + noise[1]
118-
#print("after rotation and noise: %s"%modified_x_seq[ind, lookup_seq[ped], :])
119119
modified_x_seq[ind, lookup_seq[ped], :] = torch.cat(rotate(origin, first_values_dict[ped], math.radians(angle))) + modified_x_seq[ind, lookup_seq[ped], :]
120120
return modified_x_seq
121121

122122
def submision_seq_preprocess(self, x_seq, seq_lenght, lookup_seq):
123-
123+
# create original txt structure for modified datapoints
124124
ret_x_seq_c = x_seq.data.numpy()
125-
#np.array(frame_number_predicted, copy=False, subok=True, ndmin=2)
126125
ped_ids = self.dataloader.get_id_sequence(seq_lenght)
127-
#print("lookup table: %s"%lookup_seq)
128-
#print("ped ids: %s"%ped_ids)
129126
positions_of_peds = [lookup_seq[ped] for ped in ped_ids]
130-
#print(positions_of_peds)
131-
#print("input seq: %s"%ret_x_seq_c)
132127
ret_x_seq_c = ret_x_seq_c[:, positions_of_peds, :]
133128
ret_x_seq_c_selected = ret_x_seq_c[:,0,:]
134129
ret_x_seq_c_selected[:,[0,1]] = ret_x_seq_c_selected[:,[1,0]]
135130
frame_numbers = self.dataloader.get_frame_sequence(seq_lenght)
136131
id_integrated_seq = np.append(np.array(ped_ids)[:,None], ret_x_seq_c_selected, axis=1)
137132
frame_integrated_seq = np.append(frame_numbers[:, None], id_integrated_seq, axis=1)
138-
#print("final seq: %s"%frame_integrated_seq)
139-
#print(repeated_id.shape)
140-
#print(frame_integrated_prediction)
141-
#print(result)
142-
#print("************************")
133+
143134
return frame_integrated_seq
144135

145136
def write_modified_datasets(self, dataset_instances_store):
137+
# write constructed txt structure to txt file
146138
self.dataloader.reset_batch_pointer()
147139

148140
for dataset_index in range(self.dataloader.numDatasets):
@@ -166,6 +158,7 @@ def write_dict(self, dict, base_path):
166158
self.dataloader.write_dataset(value, file_name, path)
167159

168160
def clear_directories(self, base_path, delete_all = False):
161+
# delete all files from a directory
169162
print("Clearing directories...")
170163
dir_names = self.dataloader.get_all_directory_namelist()
171164
base_path = os.path.join(self.f_prefix, base_path)
@@ -188,13 +181,13 @@ def main():
188181
# RNN size parameter (dimension of the output/hidden state)
189182
parser.add_argument('--num_data', type=int, default=5,
190183
help='Number of additional dataset for each one ')
191-
184+
# lenght of sequence
192185
parser.add_argument('--seq_length', type=int, default=20,
193186
help='Processing sequence length')
194-
187+
# allocation percentage between train and validation datasets
195188
parser.add_argument('--validation', type=float, default=0.1,
196189
help='Percentage of data will be allocated for validation in additional datasets')
197-
190+
# use of gogle drive
198191
parser.add_argument('--drive', action="store_true", default=False,
199192
help='Use Google drive or not')
200193

@@ -208,8 +201,8 @@ def main():
208201
prefix = ''
209202
f_prefix = '.'
210203
if args.drive is True:
211-
prefix='drive/semester_project/new_social_LSTM_pytorch_v2/'
212-
f_prefix = 'drive/semester_project/new_social_LSTM_pytorch_v2'
204+
prefix='drive/semester_project/social_lstm_final/'
205+
f_prefix = 'drive/semester_project/social_lstm_final'
213206

214207
augmentator = data_augmentator(f_prefix, args.num_data, args.seq_length, args.validation)
215208

grid.py

+2-18
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from torch.autograd import Variable
55

66

7-
def getGridMask(frame, dimensions, num_person, neighborhood_size, grid_size, is_occupancy):
7+
def getGridMask(frame, dimensions, num_person, neighborhood_size, grid_size, is_occupancy = False):
88
'''
99
This function computes the binary mask that represents the
1010
occupancy of each ped in the other's grid
@@ -13,32 +13,19 @@ def getGridMask(frame, dimensions, num_person, neighborhood_size, grid_size, is_
1313
dimensions : This will be a list [width, height]
1414
neighborhood_size : Scalar value representing the size of neighborhood considered
1515
grid_size : Scalar value representing the size of the grid discretization
16+
num_person : number of people exist in given frame
1617
is_occupancy: A flag using for calculation of accupancy map
1718
1819
'''
19-
20-
# Maximum number of pedestrians
21-
#print("***********************")
22-
#print(frame)
23-
#print(pedlist_seq)
24-
#print(lookup_seq)
25-
#mnp = frame.shape[0]
26-
#print("*********************")
2720
mnp = num_person
28-
#print(pedlist_seq)
29-
#print(lookup_seq)
30-
3121

3222
width, height = dimensions[0], dimensions[1]
33-
#print("width: ",width,"height: ",height)
3423
if is_occupancy:
3524
frame_mask = np.zeros((mnp, grid_size**2))
3625
else:
3726
frame_mask = np.zeros((mnp, mnp, grid_size**2))
3827
frame_np = frame.data.numpy()
3928

40-
#print("frame: ", frame)
41-
4229
#width_bound, height_bound = (neighborhood_size/(width*1.0)), (neighborhood_size/(height*1.0))
4330
width_bound, height_bound = (neighborhood_size/(width*1.0))*2, (neighborhood_size/(height*1.0))*2
4431
#print("weight_bound: ", width_bound, "height_bound: ", height_bound)
@@ -61,7 +48,6 @@ def getGridMask(frame, dimensions, num_person, neighborhood_size, grid_size, is_
6148
# If in surrounding, calculate the grid cell
6249
cell_x = int(np.floor(((other_x - width_low)/width_bound) * grid_size))
6350
cell_y = int(np.floor(((other_y - height_low)/height_bound) * grid_size))
64-
#print("cell_x: ", cell_x, "cell_y: ", cell_y)
6551

6652
if cell_x >= grid_size or cell_x < 0 or cell_y >= grid_size or cell_y < 0:
6753
continue
@@ -135,8 +121,6 @@ def getSequenceGridMask(sequence, dimensions, pedlist_seq, neighborhood_size, gr
135121
'''
136122
sl = len(sequence)
137123
sequence_mask = []
138-
#print(pedlist_seq)
139-
#print(lookup_seq)
140124

141125
for i in range(sl):
142126
mask = Variable(torch.from_numpy(getGridMask(sequence[i], dimensions, len(pedlist_seq[i]), neighborhood_size, grid_size, is_occupancy)).float())

0 commit comments

Comments
 (0)