-
Notifications
You must be signed in to change notification settings - Fork 11
/
Copy pathload_opspam.py
77 lines (53 loc) · 2.23 KB
/
load_opspam.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import texar as tx
import tensorflow as tf
import numpy as np
import os
import random
import re
random.seed(5)
def clean(s, char=False):
ns = s.lower()
ns = ns.replace('<br />', ' ')
ns = re.sub('[0-9]+', 'N', ns)
ns = re.sub('[^a-zA-Z0-9 \-.,\'\"!?()]', ' ', ns) # Eliminate all but these chars
ns = re.sub('([.,!?()\"\'])', r' \1 ', ns) # Space out punctuation
#if char:
# ns = re.sub('(\S)', r' \1 ', ns) # Space out all chars
ns = re.sub('\s{2,}', ' ', ns) # Trim ws
str.strip(ns)
return ns
def split_valid(textpath, labpath, tr_outtxt, tr_outlab,
val_outtxt, val_outlab, split_count ):
with open(textpath, 'r') as txtf, open(labpath, 'r') as labf:
texts = txtf.readlines()
labs = labf.readlines()
shfl_idx = random.sample(range(len(texts)), len(texts))
texts = [clean(texts[i]) for i in shfl_idx]
labs = [labs[i] for i in shfl_idx]
val_texts = texts[:split_count]
val_labs = labs[:split_count]
train_texts = texts[split_count:]
train_labs = labs[split_count:]
with open(tr_outtxt, 'w') as txtf, open(tr_outlab, 'w') as labf:
for r, l in zip(train_texts, train_labs):
txtf.write(r + '\n')
labf.write(l)
with open(val_outtxt, 'w') as txtf, open(val_outlab, 'w') as labf:
for r, l in zip(val_texts, val_labs):
txtf.write(r + '\n')
labf.write(l)
if __name__=='__main__':
split_valid('./opspam_reviews.txt', './opspam_labels.txt',
'./opspam_train_reviews.txt', './opspam_train_labels.txt',
'./opspam_val_reviews.txt', './opspam_val_labels.txt',
320)
split_valid('./opspam_val_reviews.txt', './opspam_val_labels.txt',
'./opspam_val_reviews.txt', './opspam_val_labels.txt',
'./opspam_test_reviews.txt', './opspam_test_labels.txt',
160)
vocab_words = tx.data.make_vocab(['./opspam_train_reviews.txt',
'./opspam_val_reviews.txt',
'./opspam_test_reviews.txt'], max_vocab_size=10000)
with open('opspam_vocab.txt', 'w') as vf:
for v in vocab_words:
vf.write(v + '\n')