Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 19 additions & 19 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,21 +1,21 @@
absl-py==0.7.1
astor==0.8.0
elasticsearch==7.0.2
gast==0.2.2
google-pasta==0.1.7
grpcio==1.22.0
h5py==2.9.0
absl-py==0.11.0
astor==0.8.1
elasticsearch==7.10.1
gast==0.3.3
google-pasta==0.2.0
grpcio==1.34.0
h5py==2.10.0
Keras-Applications==1.0.8
Keras-Preprocessing==1.1.0
Markdown==3.1.1
numpy==1.16.4
protobuf==3.9.1
six==1.12.0
tensorboard==1.14.0
tensorflow==1.14.0
tensorflow-estimator==1.14.0
tensorflow-hub==0.5.0
Keras-Preprocessing==1.1.2
Markdown==3.3.3
numpy==1.18.5
protobuf==3.13.0
six==1.15.0
tensorflow==2.3.1
tensorboard==2.4.0
tensorflow-hub==0.10.0
tensorflow-estimator==2.3.0
termcolor==1.1.0
urllib3==1.25.3
Werkzeug==0.15.5
wrapt==1.11.2
urllib3==1.25.11
Werkzeug==1.0.1
wrapt==1.12.1
26 changes: 6 additions & 20 deletions src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,6 @@
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk

# Use tensorflow 1 behavior to match the Universal Sentence Encoder
# examples (https://tfhub.dev/google/universal-sentence-encoder/2).
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub

##### INDEXING #####
Expand Down Expand Up @@ -107,8 +104,8 @@ def handle_query():
##### EMBEDDING #####

def embed_text(text):
vectors = session.run(embeddings, feed_dict={text_ph: text})
return [vector.tolist() for vector in vectors]
vectors = model(text)
return [vector.numpy().tolist() for vector in vectors]

##### MAIN SCRIPT #####

Expand All @@ -124,24 +121,13 @@ def embed_text(text):
GPU_LIMIT = 0.5

print("Downloading pre-trained embeddings from tensorflow hub...")
embed = hub.Module("https://tfhub.dev/google/universal-sentence-encoder/2")
text_ph = tf.placeholder(tf.string)
embeddings = embed(text_ph)
print("Done.")

print("Creating tensorflow session...")
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = GPU_LIMIT
session = tf.Session(config=config)
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
print("Done.")
module_url = "https://tfhub.dev/google/universal-sentence-encoder/4"
model = hub.load(module_url)
print("module %s loaded" % module_url)

client = Elasticsearch()

index_data()
run_query_loop()

print("Closing tensorflow session...")
session.close()
print("Done.")
print("Done.")