We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent afe3159 commit ebc8afeCopy full SHA for ebc8afe
examples/onnxrt/nlp/huggingface_model/language_modeling/quantization/ptq_dynamic/main.py
@@ -197,8 +197,7 @@ def main():
197
198
tokenizer = GPT2Tokenizer.from_pretrained(args.model_name_or_path,
199
use_fast=True,
200
- cache_dir=args.cache_dir if args.cache_dir else None,
201
- use_auth_token='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')
+ cache_dir=args.cache_dir if args.cache_dir else None)
202
if args.block_size <= 0:
203
args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
204
args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
0 commit comments