-
Notifications
You must be signed in to change notification settings - Fork 0
/
nlp_helper_funcs.py
92 lines (84 loc) · 3.38 KB
/
nlp_helper_funcs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
from sklearn.feature_extraction.text import CountVectorizer
import pickle
class nlp_preprocessor:
def __init__(self, vectorizer=CountVectorizer(), tokenizer=None, cleaning_function=None,
stemmer=None, model=None):
"""
A class for pipelining our data in NLP problems. The user provides a series of
tools, and this class manages all of the training, transforming, and modification
of the text data.
---
Inputs:
vectorizer: the model to use for vectorization of text data
tokenizer: The tokenizer to use, if none defaults to split on spaces
cleaning_function: how to clean the data, if None, defaults to the in built class
"""
if not tokenizer:
tokenizer = self.splitter
if not cleaning_function:
cleaning_function = self.clean_text
self.stemmer = stemmer
self.tokenizer = tokenizer
self.model = model
self.cleaning_function = cleaning_function
self.vectorizer = vectorizer
self._is_fit = False
def splitter(self, text):
"""
Default tokenizer that splits on spaces naively.
This is the default behavior if no other splitting function is specified.
"""
return text.split(' ')
def clean_text(self, text, tokenizer, stemmer):
"""
A naive function to lowercase all words, can clean them quickly.
This is the default behavior if no other cleaning function is specified.
"""
cleaned_text = []
for post in text:
cleaned_words = []
for word in tokenizer(post):
low_word = word.lower()
if stemmer:
low_word = stemmer.stem(low_word)
cleaned_words.append(low_word)
cleaned_text.append(' '.join(cleaned_words))
return cleaned_text
def fit(self, text):
"""
Cleans the data and then fits the vectorizer with
the user provided text
"""
clean_text = self.cleaning_function(text, self.tokenizer, self.stemmer)
self.vectorizer.fit(clean_text)
self._is_fit = True
def transform(self, text):
"""
Cleans any provided data and then transforms the data into
a vectorized format based on the fit function. Returns the
vectorized form of the data.
"""
if not self._is_fit:
raise ValueError("Must fit the models before transforming!")
clean_text = self.cleaning_function(text, self.tokenizer, self.stemmer)
return self.vectorizer.transform(clean_text)
def save_pipe(self, filename):
"""
Writes the attributes of the pipeline to a file
allowing a pipeline to be loaded later with the
pre-trained pieces in place.
"""
if type(filename) != str:
raise TypeError("filename must be a string")
pickle.dump(self.__dict__, open(filename+".mdl", 'wb'))
def load_pipe(self, filename):
"""
Writes the attributes of the pipeline to a file
allowing a pipeline to be loaded later with the
pre-trained pieces in place.
"""
if type(filename) != str:
raise TypeError("filename must be a string")
if filename[-4:] != '.mdl':
filename += '.mdl'
self.__dict__ = pickle.load(open(filename, 'rb'))