-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdata_helper.py
83 lines (77 loc) · 2.69 KB
/
data_helper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
"""
data helper to preprocess csv format text dataset
"""
import csv
import numpy as np
import random
class data_helper():
def __init__(self, sequence_max_length=1024):
self.alphabet = 'abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:’"/|_#$%ˆ&*˜‘+=<>()[]{} '
self.char_dict = {}
self.sequence_max_length = sequence_max_length
for i,c in enumerate(self.alphabet):
self.char_dict[c] = i+1
def char2vec(self, text):
data = np.zeros(self.sequence_max_length)
for i in range(0, len(text)):
if i > self.sequence_max_length:
return data
elif text[i] in self.char_dict:
data[i] = self.char_dict[text[i]]
else:
# unknown character set to be 68
data[i] = 68
return data
def load_csv_file(self, filename, num_classes):
"""
Load CSV file, generate one-hot labels and process text data as Paper did.
"""
all_data = []
labels = []
with open(filename, encoding='utf-8-sig') as f:
reader = csv.DictReader(f, fieldnames=['class'], restkey='fields')
for row in reader:
# One-hot
#one_hot = np.zeros(num_classes)
one_hot= int(row['class'])
labels.append(one_hot)
# Char2vec
data = np.ones(self.sequence_max_length)*68
text = row['fields'][-1].lower()
all_data.append(self.char2vec(text))
f.close()
return np.array(all_data), np.array(labels)
def load_dataset(self, dataset_path):
# Read Classes Info
with open(dataset_path+"classes.txt") as f:
classes = []
for line in f:
classes.append(line.strip())
f.close()
#num_classes = len(classes)
# Read CSV Info
#factchecking dataset
# train_data, train_label = self.load_csv_file(dataset_path+'facktchecking_train.txt', num_classes)
# test_data, test_label = self.load_csv_file(dataset_path+'facktchecking_test.txt', num_classes)
#snopes dataset
train_data, train_label = self.load_csv_file(dataset_path+'training.txt', 1)
test_data, test_label = self.load_csv_file(dataset_path+'testing.txt', 1)
return train_data, train_label, test_data, test_label
def batch_iter(self, data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]