2
2
import tensorflow_hub as hub
3
3
from bert import tokenization , run_classifier
4
4
5
+ labels = ["negative" , "positive" ]
6
+
5
7
with tf .Graph ().as_default ():
6
8
bert_module = hub .Module ("https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1" )
7
9
tokenization_info = bert_module (signature = "tokenization_info" , as_dict = True )
8
- vocab_file = tokenization_info ["vocab_file" ]
9
- do_lower_case = tokenization_info ["do_lower_case" ]
10
10
with tf .Session () as sess :
11
- vocab_file , do_lower_case = sess .run ([vocab_file , do_lower_case ])
12
-
13
- tokenizer = tokenization .FullTokenizer (vocab_file = vocab_file , do_lower_case = do_lower_case )
11
+ vocab_file , do_lower_case = sess .run (
12
+ [tokenization_info ["vocab_file" ], tokenization_info ["do_lower_case" ]]
13
+ )
14
+ tokenizer = tokenization .FullTokenizer (vocab_file = vocab_file , do_lower_case = do_lower_case )
14
15
15
16
16
17
def pre_inference (sample , metadata ):
@@ -20,5 +21,4 @@ def pre_inference(sample, metadata):
20
21
21
22
22
23
def post_inference (prediction , metadata ):
23
- labels = ["negative" , "positive" ]
24
- return {"sentiment" : labels [prediction ["response" ]["labels" ][0 ]]}
24
+ return labels [prediction ["response" ]["labels" ][0 ]]
0 commit comments