Skip to content

Commit dc7ccd4

Browse files
Adding files
0 parents  commit dc7ccd4

File tree

6 files changed

+484
-0
lines changed

6 files changed

+484
-0
lines changed

chatbot_model.h5

182 KB
Binary file not shown.

chatgui.py

Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
2+
import nltk
3+
from nltk.stem import WordNetLemmatizer
4+
lemmatizer = WordNetLemmatizer()
5+
import pickle
6+
import numpy as np
7+
8+
from keras.models import load_model
9+
model = load_model('chatbot_model.h5')
10+
import json
11+
import random
12+
intents = json.loads(open('intents.json').read())
13+
words = pickle.load(open('words.pkl','rb'))
14+
classes = pickle.load(open('classes.pkl','rb'))
15+
16+
17+
def clean_up_sentence(sentence):
18+
sentence_words = nltk.word_tokenize(sentence)
19+
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
20+
return sentence_words
21+
22+
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
23+
24+
def bow(sentence, words, show_details=True):
25+
# tokenize the pattern
26+
sentence_words = clean_up_sentence(sentence)
27+
# bag of words - matrix of N words, vocabulary matrix
28+
bag = [0]*len(words)
29+
for s in sentence_words:
30+
for i,w in enumerate(words):
31+
if w == s:
32+
# assign 1 if current word is in the vocabulary position
33+
bag[i] = 1
34+
if show_details:
35+
print ("found in bag: %s" % w)
36+
return(np.array(bag))
37+
38+
def predict_class(sentence, model):
39+
# filter out predictions below a threshold
40+
p = bow(sentence, words,show_details=False)
41+
res = model.predict(np.array([p]))[0]
42+
ERROR_THRESHOLD = 0.25
43+
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
44+
# sort by strength of probability
45+
results.sort(key=lambda x: x[1], reverse=True)
46+
return_list = []
47+
for r in results:
48+
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
49+
return return_list
50+
51+
def getResponse(ints, intents_json):
52+
tag = ints[0]['intent']
53+
list_of_intents = intents_json['intents']
54+
for i in list_of_intents:
55+
if(i['tag']== tag):
56+
result = random.choice(i['responses'])
57+
break
58+
return result
59+
60+
def chatbot_response(msg):
61+
ints = predict_class(msg, model)
62+
res = getResponse(ints, intents)
63+
return res
64+
65+
66+
#Creating GUI with tkinter
67+
import tkinter
68+
from tkinter import *
69+
70+
71+
def send():
72+
msg = EntryBox.get("1.0",'end-1c').strip()
73+
EntryBox.delete("0.0",END)
74+
75+
if msg != '':
76+
ChatLog.config(state=NORMAL)
77+
ChatLog.insert(END, "You: " + msg + '\n\n')
78+
ChatLog.config(foreground="#442265", font=("Verdana", 12 ))
79+
80+
res = chatbot_response(msg)
81+
ChatLog.insert(END, "Bot: " + res + '\n\n')
82+
83+
ChatLog.config(state=DISABLED)
84+
ChatLog.yview(END)
85+
86+
87+
base = Tk()
88+
base.title("Hello")
89+
base.geometry("400x500")
90+
base.resizable(width=FALSE, height=FALSE)
91+
92+
#Create Chat window
93+
ChatLog = Text(base, bd=0, bg="white", height="8", width="50", font="Arial",)
94+
95+
ChatLog.config(state=DISABLED)
96+
97+
#Bind scrollbar to Chat window
98+
scrollbar = Scrollbar(base, command=ChatLog.yview, cursor="heart")
99+
ChatLog['yscrollcommand'] = scrollbar.set
100+
101+
#Create Button to send message
102+
SendButton = Button(base, font=("Verdana",12,'bold'), text="Send", width="12", height=5,
103+
bd=0, bg="#32de97", activebackground="#3c9d9b",fg='#ffffff',
104+
command= send )
105+
106+
#Create the box to enter message
107+
EntryBox = Text(base, bd=0, bg="white",width="29", height="5", font="Arial")
108+
#EntryBox.bind("<Return>", send)
109+
110+
111+
#Place all components on the screen
112+
scrollbar.place(x=376,y=6, height=386)
113+
ChatLog.place(x=6,y=6, height=386, width=370)
114+
EntryBox.place(x=128, y=401, height=90, width=265)
115+
SendButton.place(x=6, y=401, height=90)
116+
117+
base.mainloop()

classes.pkl

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
(lp0
2+
Vadverse_drug
3+
p1
4+
aVblood_pressure
5+
p2
6+
aVblood_pressure_search
7+
p3
8+
aVgoodbye
9+
p4
10+
aVgreeting
11+
p5
12+
aVhospital_search
13+
p6
14+
aVoptions
15+
p7
16+
aVpharmacy_search
17+
p8
18+
aVthanks
19+
p9
20+
a.

intents.json

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
{"intents": [
2+
{"tag": "greeting",
3+
"patterns": ["Hi there", "How are you", "Is anyone there?","Hey","Hola", "Hello", "Good day"],
4+
"responses": ["Hello, thanks for asking", "Good to see you again", "Hi there, how can I help?"],
5+
"context": [""]
6+
},
7+
{"tag": "goodbye",
8+
"patterns": ["Bye", "See you later", "Goodbye", "Nice chatting to you, bye", "Till next time"],
9+
"responses": ["See you!", "Have a nice day", "Bye! Come back again soon."],
10+
"context": [""]
11+
},
12+
{"tag": "thanks",
13+
"patterns": ["Thanks", "Thank you", "That's helpful", "Awesome, thanks", "Thanks for helping me"],
14+
"responses": ["Happy to help!", "Any time!", "My pleasure"],
15+
"context": [""]
16+
},
17+
{"tag": "noanswer",
18+
"patterns": [],
19+
"responses": ["Sorry, can't understand you", "Please give me more info", "Not sure I understand"],
20+
"context": [""]
21+
},
22+
{"tag": "options",
23+
"patterns": ["How you could help me?", "What you can do?", "What help you provide?", "How you can be helpful?", "What support is offered"],
24+
"responses": ["I can guide you through Adverse drug reaction list, Blood pressure tracking, Hospitals and Pharmacies", "Offering support for Adverse drug reaction, Blood pressure, Hospitals and Pharmacies"],
25+
"context": [""]
26+
},
27+
{"tag": "adverse_drug",
28+
"patterns": ["How to check Adverse drug reaction?", "Open adverse drugs module", "Give me a list of drugs causing adverse behavior", "List all drugs suitable for patient with adverse reaction", "Which drugs dont have adverse reaction?" ],
29+
"responses": ["Navigating to Adverse drug reaction module"],
30+
"context": [""]
31+
},
32+
{"tag": "blood_pressure",
33+
"patterns": ["Open blood pressure module", "Task related to blood pressure", "Blood pressure data entry", "I want to log blood pressure results", "Blood pressure data management" ],
34+
"responses": ["Navigating to Blood Pressure module"],
35+
"context": [""]
36+
},
37+
{"tag": "blood_pressure_search",
38+
"patterns": ["I want to search for blood pressure result history", "Blood pressure for patient", "Load patient blood pressure result", "Show blood pressure results for patient", "Find blood pressure results by ID" ],
39+
"responses": ["Please provide Patient ID", "Patient ID?"],
40+
"context": ["search_blood_pressure_by_patient_id"]
41+
},
42+
{"tag": "search_blood_pressure_by_patient_id",
43+
"patterns": [],
44+
"responses": ["Loading Blood pressure result for Patient"],
45+
"context": [""]
46+
},
47+
{"tag": "pharmacy_search",
48+
"patterns": ["Find me a pharmacy", "Find pharmacy", "List of pharmacies nearby", "Locate pharmacy", "Search pharmacy" ],
49+
"responses": ["Please provide pharmacy name"],
50+
"context": ["search_pharmacy_by_name"]
51+
},
52+
{"tag": "search_pharmacy_by_name",
53+
"patterns": [],
54+
"responses": ["Loading pharmacy details"],
55+
"context": [""]
56+
},
57+
{"tag": "hospital_search",
58+
"patterns": ["Lookup for hospital", "Searching for hospital to transfer patient", "I want to search hospital data", "Hospital lookup for patient", "Looking up hospital details" ],
59+
"responses": ["Please provide hospital name or location"],
60+
"context": ["search_hospital_by_params"]
61+
},
62+
{"tag": "search_hospital_by_params",
63+
"patterns": [],
64+
"responses": ["Please provide hospital type"],
65+
"context": ["search_hospital_by_type"]
66+
},
67+
{"tag": "search_hospital_by_type",
68+
"patterns": [],
69+
"responses": ["Loading hospital details"],
70+
"context": [""]
71+
}
72+
]
73+
}

train_chatbot.py

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
import nltk
2+
nltk.download('punkt')
3+
nltk.download('wordnet')
4+
from nltk.stem import WordNetLemmatizer
5+
lemmatizer = WordNetLemmatizer()
6+
import json
7+
import pickle
8+
9+
import numpy as np
10+
from keras.models import Sequential
11+
from keras.layers import Dense, Activation, Dropout
12+
from keras.optimizers import SGD
13+
import random
14+
15+
words=[]
16+
classes = []
17+
documents = []
18+
ignore_words = ['?', '!']
19+
data_file = open('intents.json').read()
20+
intents = json.loads(data_file)
21+
22+
23+
for intent in intents['intents']:
24+
for pattern in intent['patterns']:
25+
26+
# take each word and tokenize it
27+
w = nltk.word_tokenize(pattern)
28+
words.extend(w)
29+
# adding documents
30+
documents.append((w, intent['tag']))
31+
32+
# adding classes to our class list
33+
if intent['tag'] not in classes:
34+
classes.append(intent['tag'])
35+
36+
words = [lemmatizer.lemmatize(w.lower()) for w in words if w not in ignore_words]
37+
words = sorted(list(set(words)))
38+
39+
classes = sorted(list(set(classes)))
40+
41+
print (len(documents), "documents")
42+
43+
print (len(classes), "classes", classes)
44+
45+
print (len(words), "unique lemmatized words", words)
46+
47+
48+
pickle.dump(words,open('words.pkl','wb'))
49+
pickle.dump(classes,open('classes.pkl','wb'))
50+
51+
# initializing training data
52+
training = []
53+
output_empty = [0] * len(classes)
54+
for doc in documents:
55+
# initializing bag of words
56+
bag = []
57+
# list of tokenized words for the pattern
58+
pattern_words = doc[0]
59+
# lemmatize each word - create base word, in attempt to represent related words
60+
pattern_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_words]
61+
# create our bag of words array with 1, if word match found in current pattern
62+
for w in words:
63+
bag.append(1) if w in pattern_words else bag.append(0)
64+
65+
# output is a '0' for each tag and '1' for current tag (for each pattern)
66+
output_row = list(output_empty)
67+
output_row[classes.index(doc[1])] = 1
68+
69+
training.append([bag, output_row])
70+
# shuffle our features and turn into np.array
71+
random.shuffle(training)
72+
training = np.array(training)
73+
# create train and test lists. X - patterns, Y - intents
74+
train_x = list(training[:,0])
75+
train_y = list(training[:,1])
76+
print("Training data created")
77+
78+
79+
# Create model - 3 layers. First layer 128 neurons, second layer 64 neurons and 3rd output layer contains number of neurons
80+
# equal to number of intents to predict output intent with softmax
81+
model = Sequential()
82+
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
83+
model.add(Dropout(0.5))
84+
model.add(Dense(64, activation='relu'))
85+
model.add(Dropout(0.5))
86+
model.add(Dense(len(train_y[0]), activation='softmax'))
87+
88+
# Compile model. Stochastic gradient descent with Nesterov accelerated gradient gives good results for this model
89+
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
90+
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
91+
92+
#fitting and saving the model
93+
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
94+
model.save('chatbot_model.h5', hist)
95+
96+
print("model created")

0 commit comments

Comments
 (0)