Skip to content

Commit a876a20

Browse files
committed
Adjust pytorch distilbert notebook
1 parent 92d7162 commit a876a20

File tree

2 files changed

+24
-12
lines changed

2 files changed

+24
-12
lines changed

examples/notebook/pytorch/Quick_Started_Notebook_of_INC_for_Pytorch.ipynb

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -45,14 +45,15 @@
4545
"outputs": [],
4646
"source": [
4747
"# install neural-compressor from source\n",
48+
"import sys\n",
4849
"!git clone https://github.com/intel/neural-compressor.git\n",
4950
"%cd ./neural-compressor\n",
50-
"!pip install -r requirements.txt\n",
51-
"!python setup.py install\n",
51+
"!{sys.executable} -m pip install -r requirements.txt\n",
52+
"!{sys.executable} setup.py install\n",
5253
"%cd ..\n",
5354
"\n",
5455
"# or install stable basic version from pypi\n",
55-
"!pip install neural-compressor"
56+
"!{sys.executable} -m pip install neural-compressor\n"
5657
]
5758
},
5859
{
@@ -62,7 +63,7 @@
6263
"outputs": [],
6364
"source": [
6465
"# install other packages used in this notebook.\n",
65-
"!pip install torch>=1.9.0 transformers>=4.16.0 accelerate sympy numpy sentencepiece!=0.1.92 protobuf<=3.20.3 datasets>=1.1.3 scipy scikit-learn Keras-Preprocessing"
66+
"!{sys.executable} -m pip install -r requirements.txt\n"
6667
]
6768
},
6869
{
@@ -102,7 +103,7 @@
102103
" AutoTokenizer,\n",
103104
" EvalPrediction,\n",
104105
" Trainer,\n",
105-
")"
106+
")\n"
106107
]
107108
},
108109
{
@@ -114,7 +115,7 @@
114115
"task_name = 'mrpc'\n",
115116
"raw_datasets = load_dataset(\"glue\", task_name)\n",
116117
"label_list = raw_datasets[\"train\"].features[\"label\"].names\n",
117-
"num_labels = len(label_list)"
118+
"num_labels = len(label_list)\n"
118119
]
119120
},
120121
{
@@ -150,7 +151,7 @@
150151
" from_tf=False,\n",
151152
" config=config,\n",
152153
" use_auth_token=None,\n",
153-
")"
154+
")\n"
154155
]
155156
},
156157
{
@@ -179,7 +180,7 @@
179180
" result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)\n",
180181
" return result\n",
181182
"\n",
182-
"raw_datasets = raw_datasets.map(preprocess_function, batched=True)"
183+
"raw_datasets = raw_datasets.map(preprocess_function, batched=True)\n"
183184
]
184185
},
185186
{
@@ -260,7 +261,7 @@
260261
" assert False, \"No metric returned, Please check inference metric!\"\n",
261262
"\n",
262263
"def eval_func(model):\n",
263-
" return take_eval_steps(model, trainer)"
264+
" return take_eval_steps(model, trainer)\n"
264265
]
265266
},
266267
{
@@ -284,7 +285,7 @@
284285
"from neural_compressor.config import PostTrainingQuantConfig, TuningCriterion\n",
285286
"tuning_criterion = TuningCriterion(max_trials=600)\n",
286287
"conf = PostTrainingQuantConfig(approach=\"static\", tuning_criterion=tuning_criterion)\n",
287-
"q_model = fit(model, conf=conf, calib_dataloader=eval_dataloader, eval_func=eval_func)"
288+
"q_model = fit(model, conf=conf, calib_dataloader=eval_dataloader, eval_func=eval_func)\n"
288289
]
289290
},
290291
{
@@ -303,10 +304,10 @@
303304
"outputs": [],
304305
"source": [
305306
"# fp32 benchmark\n",
306-
"!python benchmark.py --input_model ./pytorch_model.bin 2>&1|tee fp32_benchmark.log\n",
307+
"!{sys.executable} benchmark.py --input_model ./pytorch_model.bin 2>&1|tee fp32_benchmark.log\n",
307308
"\n",
308309
"# int8 benchmark\n",
309-
"!python benchmark.py --input_model ./saved_results/best_model.pt 2>&1|tee int8_benchmark.log\n"
310+
"!{sys.executable} benchmark.py --input_model ./saved_results/best_model.pt 2>&1|tee int8_benchmark.log\n"
310311
]
311312
}
312313
],
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
torch>=1.9.0
2+
transformers>=4.16.0
3+
accelerate
4+
sympy
5+
numpy
6+
sentencepiece!=0.1.92
7+
protobuf<=3.20.3
8+
datasets>=1.1.3
9+
scipy
10+
scikit-learn
11+
Keras-Preprocessing

0 commit comments

Comments
 (0)