Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

using custom data but gives run time error . Please suggest #357

Open
rajeev12111 opened this issue Oct 7, 2023 · 12 comments
Open

using custom data but gives run time error . Please suggest #357

rajeev12111 opened this issue Oct 7, 2023 · 12 comments

Comments

@rajeev12111
Copy link


RuntimeError Traceback (most recent call last)
Cell In[18], line 15
10 config.verbose = False # If verbose == True, PyABSA will output the model strcture and seversal processed data examples
11 config.notice = (
12 "This is an training example for aspect term extraction" # for memos usage
13 )
---> 15 trainer = ATEPC.ATEPCTrainer(
16 config=config,
17 dataset=my_dataset,
18 from_checkpoint="english", # if you want to resume training from our pretrained checkpoints, you can pass the checkpoint name here
19 auto_device=DeviceTypeOption.AUTO, # use cuda if available
20 checkpoint_save_mode=ModelSaveOption.SAVE_MODEL_STATE_DICT, # save state dict only instead of the whole model
21 load_aug=False, # there are some augmentation dataset for integrated datasets, you use them by setting load_aug=True to improve performance
22 )

File /opt/conda/lib/python3.10/site-packages/pyabsa/tasks/AspectTermExtraction/trainer/atepc_trainer.py:69, in ATEPCTrainer.init(self, config, dataset, from_checkpoint, checkpoint_save_mode, auto_device, path_to_save, load_aug)
64 self.config.task_code = TaskCodeOption.Aspect_Term_Extraction_and_Classification
65 self.config.task_name = TaskNameOption().get(
66 TaskCodeOption.Aspect_Term_Extraction_and_Classification
67 )
---> 69 self._run()

File /opt/conda/lib/python3.10/site-packages/pyabsa/framework/trainer_class/trainer_template.py:241, in Trainer._run(self)
239 self.config.seed = s
240 if self.config.checkpoint_save_mode:
--> 241 model_path.append(self.training_instructor(self.config).run())
242 else:
243 # always return the last trained model if you don't save trained model
244 model = self.inference_model_class(
245 checkpoint=self.training_instructor(self.config).run()
246 )

File /opt/conda/lib/python3.10/site-packages/pyabsa/tasks/AspectTermExtraction/instructor/atepc_instructor.py:794, in ATEPCTrainingInstructor.run(self)
793 def run(self):
--> 794 return self._train(criterion=None)

File /opt/conda/lib/python3.10/site-packages/pyabsa/framework/instructor_class/instructor_template.py:357, in BaseTrainingInstructor._train(self, criterion)
354 pass
356 # Resume training from a previously trained model
--> 357 self._resume_from_checkpoint()
359 # Initialize the learning rate scheduler if warmup_step is specified
360 if self.config.warmup_step >= 0:

File /opt/conda/lib/python3.10/site-packages/pyabsa/framework/instructor_class/instructor_template.py:455, in BaseTrainingInstructor._resume_from_checkpoint(self)
451 self.model.module.load_state_dict(
452 torch.load(state_dict_path[0])
453 )
454 else:
--> 455 self.model.load_state_dict(
456 torch.load(
457 state_dict_path[0], map_location=self.config.device
458 )
459 )
460 self.model.config = self.config
461 self.model.to(self.config.device)

File /opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py:2041, in Module.load_state_dict(self, state_dict, strict)
2036 error_msgs.insert(
2037 0, 'Missing key(s) in state_dict: {}. '.format(
2038 ', '.join('"{}"'.format(k) for k in missing_keys)))
2040 if len(error_msgs) > 0:
-> 2041 raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
2042 self.class.name, "\n\t".join(error_msgs)))
2043 return _IncompatibleKeys(missing_keys, unexpected_keys)

RuntimeError: Error(s) in loading state_dict for FAST_LCF_ATEPC:
Unexpected key(s) in state_dict: "bert4global.embeddings.position_ids".

@rajeev12111
Copy link
Author

from pyabsa import DatasetItem
my_dataset = DatasetItem("/kaggle/working/integrated_datasets/atepc_datasets/custom.apc.train.txt.atepc")

from pyabsa import ModelSaveOption, DeviceTypeOption
import warnings

warnings.filterwarnings("ignore")

config.batch_size = 16
config.patience = 2
config.log_step = -1
config.seed = [1, 2, 3]
config.verbose = False # If verbose == True, PyABSA will output the model strcture and seversal processed data examples
config.notice = (
"This is an training example for aspect term extraction" # for memos usage
)

trainer = ATEPC.ATEPCTrainer(
config=config,
dataset=my_dataset,
from_checkpoint="english", # if you want to resume training from our pretrained checkpoints, you can pass the checkpoint name here
auto_device=DeviceTypeOption.AUTO, # use cuda if available
checkpoint_save_mode=ModelSaveOption.SAVE_MODEL_STATE_DICT, # save state dict only instead of the whole model
load_aug=False, # there are some augmentation dataset for integrated datasets, you use them by setting load_aug=True to improve performance
)

suggest to correct and debug the code

@yangheng95
Copy link
Owner

try pip install pyabsa -U for V2 and pip install pyabsa<2.0 -Ufor V1

@rajeev12111
Copy link
Author

[2023-10-08 12:23:53] (2.3.4) Set Model Device: cuda:0
[2023-10-08 12:23:53] (2.3.4) Device Name: Tesla T4
Downloading (…)lve/main/config.json: 100%
579/579 [00:00<00:00, 45.9kB/s]
2023-10-08 12:23:53,894 INFO: PyABSA version: 2.3.4
2023-10-08 12:23:53,897 INFO: Transformers version: 4.33.0
2023-10-08 12:23:53,898 INFO: Torch version: 2.0.0+cuda11.8
2023-10-08 12:23:53,899 INFO: Device: Tesla T4
2023-10-08 12:23:53,900 INFO: custom.apc.train.txt.atepc in the trainer is not a exact path, will search dataset in current working directory
2023-10-08 12:23:53,901 INFO: Warning! auto_evaluate=True, however cannot find test set using for evaluating!
Downloading (…)okenizer_config.json: 100%
52.0/52.0 [00:00<00:00, 3.95kB/s]
Downloading spm.model: 100%
2.46M/2.46M [00:00<00:00, 43.8MB/s]
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
convert examples to features: 100%|██████████| 100/100 [00:00<00:00, 741.28it/s]
2023-10-08 12:23:56,319 INFO: Dataset Label Details: {'NEGATIVE': 26, 'POSITIVE': 63, 'NEUTRAL': 11, 'Sum': 100}

Downloading pytorch_model.bin: 100%
371M/371M [00:01<00:00, 309MB/s]
2023-10-08 12:24:00,702 INFO: Save cache dataset to fast_lcf_atepc.custom.apc.train.txt.atepc.dataset.99144ac14eaf5168ddd19ec0b56a2cbcfccfe564762e5fb2e81dac19326b747d.cache
2023-10-08 12:24:07,532 INFO: cuda memory allocated:764963840
2023-10-08 12:24:07,533 INFO: ABSADatasetsVersion:None --> Calling Count:0
2023-10-08 12:24:07,535 INFO: IOB_label_to_index:{'B-ASP': 1, 'I-ASP': 2, 'O': 3, '[CLS]': 4, '[SEP]': 5} --> Calling Count:1
2023-10-08 12:24:07,536 INFO: MV:<metric_visualizer.metric_visualizer.MetricVisualizer object at 0x7f89f822bb80> --> Calling Count:0
2023-10-08 12:24:07,537 INFO: PyABSAVersion:2.3.4 --> Calling Count:1
2023-10-08 12:24:07,538 INFO: SRD:3 --> Calling Count:200
2023-10-08 12:24:07,539 INFO: TorchVersion:2.0.0+cuda11.8 --> Calling Count:1
2023-10-08 12:24:07,540 INFO: TransformersVersion:4.33.0 --> Calling Count:1
2023-10-08 12:24:07,541 INFO: auto_device:True --> Calling Count:2
2023-10-08 12:24:07,542 INFO: batch_size:16 --> Calling Count:3
2023-10-08 12:24:07,543 INFO: cache_dataset:True --> Calling Count:1
2023-10-08 12:24:07,544 INFO: checkpoint_save_mode:1 --> Calling Count:4
2023-10-08 12:24:07,545 INFO: cross_validate_fold:-1 --> Calling Count:0
2023-10-08 12:24:07,546 INFO: dataset_file:{'train': ['/kaggle/working/integrated_datasets/atepc_datasets/custom.apc.train.txt.atepc'], 'test': [], 'valid': []} --> Calling Count:5
2023-10-08 12:24:07,547 INFO: dataset_name:custom.apc.train.txt.atepc --> Calling Count:3
2023-10-08 12:24:07,548 INFO: device:cuda:0 --> Calling Count:4
2023-10-08 12:24:07,548 INFO: device_name:Tesla T4 --> Calling Count:1
2023-10-08 12:24:07,549 INFO: dropout:0.5 --> Calling Count:1
2023-10-08 12:24:07,550 INFO: dynamic_truncate:True --> Calling Count:200
2023-10-08 12:24:07,551 INFO: embed_dim:768 --> Calling Count:0
2023-10-08 12:24:07,551 INFO: evaluate_begin:0 --> Calling Count:0
2023-10-08 12:24:07,552 INFO: from_checkpoint:english --> Calling Count:0
2023-10-08 12:24:07,553 INFO: gradient_accumulation_steps:1 --> Calling Count:3
2023-10-08 12:24:07,554 INFO: hidden_dim:768 --> Calling Count:6
2023-10-08 12:24:07,554 INFO: index_to_IOB_label:{1: 'B-ASP', 2: 'I-ASP', 3: 'O', 4: '[CLS]', 5: '[SEP]'} --> Calling Count:0
2023-10-08 12:24:07,555 INFO: index_to_label:{0: 'NEGATIVE', 1: 'NEUTRAL', 2: 'POSITIVE'} --> Calling Count:1
2023-10-08 12:24:07,556 INFO: inference_model:None --> Calling Count:0
2023-10-08 12:24:07,557 INFO: initializer:xavier_uniform_ --> Calling Count:0
2023-10-08 12:24:07,557 INFO: l2reg:1e-05 --> Calling Count:2
2023-10-08 12:24:07,558 INFO: label_list:['B-ASP', 'I-ASP', 'O', '[CLS]', '[SEP]'] --> Calling Count:1
2023-10-08 12:24:07,559 INFO: label_to_index:{'NEGATIVE': 0, 'NEUTRAL': 1, 'POSITIVE': 2} --> Calling Count:0
2023-10-08 12:24:07,559 INFO: lcf:cdw --> Calling Count:0
2023-10-08 12:24:07,560 INFO: learning_rate:2e-05 --> Calling Count:1
2023-10-08 12:24:07,561 INFO: load_aug:False --> Calling Count:1
2023-10-08 12:24:07,562 INFO: log_step:-1 --> Calling Count:0
2023-10-08 12:24:07,563 INFO: logger:<Logger fast_lcf_atepc (INFO)> --> Calling Count:8
2023-10-08 12:24:07,563 INFO: max_seq_len:80 --> Calling Count:701
2023-10-08 12:24:07,564 INFO: model:<class 'pyabsa.tasks.AspectTermExtraction.models.lcf.fast_lcf_atepc.FAST_LCF_ATEPC'> --> Calling Count:5
2023-10-08 12:24:07,565 INFO: model_name:fast_lcf_atepc --> Calling Count:102
2023-10-08 12:24:07,566 INFO: model_path_to_save:checkpoints --> Calling Count:3
2023-10-08 12:24:07,566 INFO: notice:This is an training example for aspect term extraction --> Calling Count:0
2023-10-08 12:24:07,567 INFO: num_epoch:10 --> Calling Count:1
2023-10-08 12:24:07,568 INFO: num_labels:6 --> Calling Count:2
2023-10-08 12:24:07,569 INFO: optimizer:adamw --> Calling Count:2
2023-10-08 12:24:07,569 INFO: output_dim:3 --> Calling Count:1
2023-10-08 12:24:07,570 INFO: overwrite_cache:False --> Calling Count:0
2023-10-08 12:24:07,571 INFO: path_to_save:None --> Calling Count:1
2023-10-08 12:24:07,572 INFO: patience:2 --> Calling Count:0
2023-10-08 12:24:07,573 INFO: pretrained_bert:microsoft/deberta-v3-base --> Calling Count:5
2023-10-08 12:24:07,573 INFO: save_mode:1 --> Calling Count:0
2023-10-08 12:24:07,574 INFO: seed:1 --> Calling Count:6
2023-10-08 12:24:07,575 INFO: sep_indices:2 --> Calling Count:0
2023-10-08 12:24:07,576 INFO: show_metric:False --> Calling Count:0
2023-10-08 12:24:07,576 INFO: spacy_model:en_core_web_sm --> Calling Count:1
2023-10-08 12:24:07,577 INFO: srd_alignment:True --> Calling Count:0
2023-10-08 12:24:07,578 INFO: task_code:ATEPC --> Calling Count:1
2023-10-08 12:24:07,579 INFO: task_name:Aspect Term Extraction and Polarity Classification --> Calling Count:0
2023-10-08 12:24:07,579 INFO: use_amp:False --> Calling Count:1
2023-10-08 12:24:07,580 INFO: use_bert_spc:True --> Calling Count:0
2023-10-08 12:24:07,581 INFO: use_syntax_based_SRD:False --> Calling Count:100
2023-10-08 12:24:07,582 INFO: verbose:False --> Calling Count:0
2023-10-08 12:24:07,582 INFO: warmup_step:-1 --> Calling Count:0
2023-10-08 12:24:07,583 INFO: window:lr --> Calling Count:0
2023-10-08 12:24:07,587 INFO: cuda memory allocated:764963840
2023-10-08 12:24:07,588 INFO: ABSADatasetsVersion:None --> Calling Count:0
2023-10-08 12:24:07,589 INFO: IOB_label_to_index:{'B-ASP': 1, 'I-ASP': 2, 'O': 3, '[CLS]': 4, '[SEP]': 5} --> Calling Count:1
2023-10-08 12:24:07,590 INFO: MV:<metric_visualizer.metric_visualizer.MetricVisualizer object at 0x7f89f822bb80> --> Calling Count:0
2023-10-08 12:24:07,591 INFO: PyABSAVersion:2.3.4 --> Calling Count:1
2023-10-08 12:24:07,591 INFO: SRD:3 --> Calling Count:200
2023-10-08 12:24:07,592 INFO: TorchVersion:2.0.0+cuda11.8 --> Calling Count:1
2023-10-08 12:24:07,593 INFO: TransformersVersion:4.33.0 --> Calling Count:1
2023-10-08 12:24:07,594 INFO: auto_device:True --> Calling Count:3
2023-10-08 12:24:07,594 INFO: batch_size:16 --> Calling Count:3
2023-10-08 12:24:07,595 INFO: cache_dataset:True --> Calling Count:1
2023-10-08 12:24:07,596 INFO: checkpoint_save_mode:1 --> Calling Count:4
2023-10-08 12:24:07,597 INFO: cross_validate_fold:-1 --> Calling Count:1
2023-10-08 12:24:07,597 INFO: dataset_file:{'train': ['/kaggle/working/integrated_datasets/atepc_datasets/custom.apc.train.txt.atepc'], 'test': [], 'valid': []} --> Calling Count:5
2023-10-08 12:24:07,598 INFO: dataset_name:custom.apc.train.txt.atepc --> Calling Count:3
2023-10-08 12:24:07,599 INFO: device:cuda:0 --> Calling Count:8
2023-10-08 12:24:07,600 INFO: device_name:Tesla T4 --> Calling Count:1
2023-10-08 12:24:07,600 INFO: dropout:0.5 --> Calling Count:1
2023-10-08 12:24:07,601 INFO: dynamic_truncate:True --> Calling Count:200
2023-10-08 12:24:07,602 INFO: embed_dim:768 --> Calling Count:0
2023-10-08 12:24:07,603 INFO: evaluate_begin:0 --> Calling Count:0
2023-10-08 12:24:07,603 INFO: from_checkpoint:english --> Calling Count:0
2023-10-08 12:24:07,604 INFO: gradient_accumulation_steps:1 --> Calling Count:3
2023-10-08 12:24:07,605 INFO: hidden_dim:768 --> Calling Count:6
2023-10-08 12:24:07,606 INFO: index_to_IOB_label:{1: 'B-ASP', 2: 'I-ASP', 3: 'O', 4: '[CLS]', 5: '[SEP]'} --> Calling Count:0
2023-10-08 12:24:07,606 INFO: index_to_label:{0: 'NEGATIVE', 1: 'NEUTRAL', 2: 'POSITIVE'} --> Calling Count:1
2023-10-08 12:24:07,607 INFO: inference_model:None --> Calling Count:0
2023-10-08 12:24:07,608 INFO: initializer:xavier_uniform
--> Calling Count:0
2023-10-08 12:24:07,609 INFO: l2reg:1e-05 --> Calling Count:2
2023-10-08 12:24:07,609 INFO: label_list:['B-ASP', 'I-ASP', 'O', '[CLS]', '[SEP]'] --> Calling Count:1
2023-10-08 12:24:07,610 INFO: label_to_index:{'NEGATIVE': 0, 'NEUTRAL': 1, 'POSITIVE': 2} --> Calling Count:0
2023-10-08 12:24:07,611 INFO: lcf:cdw --> Calling Count:0
2023-10-08 12:24:07,612 INFO: learning_rate:2e-05 --> Calling Count:1
2023-10-08 12:24:07,612 INFO: load_aug:False --> Calling Count:1
2023-10-08 12:24:07,613 INFO: log_step:-1 --> Calling Count:0
2023-10-08 12:24:07,614 INFO: logger:<_Logger fast_lcf_atepc (INFO)> --> Calling Count:8
2023-10-08 12:24:07,615 INFO: max_seq_len:80 --> Calling Count:701
2023-10-08 12:24:07,615 INFO: model:<class 'pyabsa.tasks.AspectTermExtraction.models.lcf.fast_lcf_atepc.FAST_LCF_ATEPC'> --> Calling Count:5
2023-10-08 12:24:07,616 INFO: model_name:fast_lcf_atepc --> Calling Count:102
2023-10-08 12:24:07,617 INFO: model_path_to_save:checkpoints --> Calling Count:3
2023-10-08 12:24:07,618 INFO: notice:This is an training example for aspect term extraction --> Calling Count:0
2023-10-08 12:24:07,619 INFO: num_epoch:10 --> Calling Count:1
2023-10-08 12:24:07,620 INFO: num_labels:6 --> Calling Count:2
2023-10-08 12:24:07,621 INFO: optimizer:adamw --> Calling Count:2
2023-10-08 12:24:07,621 INFO: output_dim:3 --> Calling Count:1
2023-10-08 12:24:07,622 INFO: overwrite_cache:False --> Calling Count:0
2023-10-08 12:24:07,623 INFO: path_to_save:None --> Calling Count:1
2023-10-08 12:24:07,624 INFO: patience:2 --> Calling Count:0
2023-10-08 12:24:07,624 INFO: pretrained_bert:microsoft/deberta-v3-base --> Calling Count:5
2023-10-08 12:24:07,625 INFO: save_mode:1 --> Calling Count:0
2023-10-08 12:24:07,626 INFO: seed:1 --> Calling Count:6
2023-10-08 12:24:07,627 INFO: sep_indices:2 --> Calling Count:0
2023-10-08 12:24:07,627 INFO: show_metric:False --> Calling Count:0
2023-10-08 12:24:07,628 INFO: spacy_model:en_core_web_sm --> Calling Count:1
2023-10-08 12:24:07,629 INFO: srd_alignment:True --> Calling Count:0
2023-10-08 12:24:07,630 INFO: task_code:ATEPC --> Calling Count:1
2023-10-08 12:24:07,630 INFO: task_name:Aspect Term Extraction and Polarity Classification --> Calling Count:0
2023-10-08 12:24:07,631 INFO: tokenizer:DebertaV2TokenizerFast(name_or_path='microsoft/deberta-v3-base', vocab_size=128000, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '[CLS]', 'eos_token': '[SEP]', 'unk_token': '[UNK]', 'sep_token': '[SEP]', 'pad_token': '[PAD]', 'cls_token': '[CLS]', 'mask_token': '[MASK]'}, clean_up_tokenization_spaces=True) --> Calling Count:0
2023-10-08 12:24:07,632 INFO: use_amp:False --> Calling Count:1
2023-10-08 12:24:07,633 INFO: use_bert_spc:True --> Calling Count:0
2023-10-08 12:24:07,633 INFO: use_syntax_based_SRD:False --> Calling Count:100
2023-10-08 12:24:07,634 INFO: verbose:False --> Calling Count:1
2023-10-08 12:24:07,635 INFO: warmup_step:-1 --> Calling Count:0
2023-10-08 12:24:07,636 INFO: window:lr --> Calling Count:0
[2023-10-08 12:24:07] (2.3.4) Downloading checkpoint:english
[2023-10-08 12:24:07] (2.3.4) Notice: The pretrained model are used for testing, it is recommended to train the model on your own custom datasets
Downloading checkpoint: 579MB [00:01, 333.35MB/s]
Find zipped checkpoint: ./checkpoints/ATEPC_ENGLISH_CHECKPOINT/fast_lcf_atepc_English_cdw_apcacc_82.36_apcf1_81.89_atef1_75.43.zip, unzipping

Done.
[2023-10-08 12:24:16] (2.3.4) If the auto-downloading failed, please download it via browser: https://huggingface.co/spaces/yangheng/PyABSA/resolve/main/checkpoints/English/ATEPC/fast_lcf_atepc_English_cdw_apcacc_82.36_apcf1_81.89_atef1_75.43.zip
2023-10-08 12:24:16,518 INFO: Checkpoint downloaded at: checkpoints/ATEPC_ENGLISH_CHECKPOINT/fast_lcf_atepc_English_cdw_apcacc_82.36_apcf1_81.89_atef1_75.43

RuntimeError Traceback (most recent call last)
Cell In[8], line 15
10 config.verbose = False # If verbose == True, PyABSA will output the model strcture and seversal processed data examples
11 config.notice = (
12 "This is an training example for aspect term extraction" # for memos usage
13 )
---> 15 trainer = ATEPC.ATEPCTrainer(
16 config=config,
17 dataset=my_dataset,
18 from_checkpoint="english", # if you want to resume training from our pretrained checkpoints, you can pass the checkpoint name here
19 auto_device=DeviceTypeOption.AUTO, # use cuda if available
20 checkpoint_save_mode=ModelSaveOption.SAVE_MODEL_STATE_DICT, # save state dict only instead of the whole model
21 load_aug=False, # there are some augmentation dataset for integrated datasets, you use them by setting load_aug=True to improve performance
22 )

File /opt/conda/lib/python3.10/site-packages/pyabsa/tasks/AspectTermExtraction/trainer/atepc_trainer.py:69, in ATEPCTrainer.init(self, config, dataset, from_checkpoint, checkpoint_save_mode, auto_device, path_to_save, load_aug)
64 self.config.task_code = TaskCodeOption.Aspect_Term_Extraction_and_Classification
65 self.config.task_name = TaskNameOption().get(
66 TaskCodeOption.Aspect_Term_Extraction_and_Classification
67 )
---> 69 self._run()

File /opt/conda/lib/python3.10/site-packages/pyabsa/framework/trainer_class/trainer_template.py:241, in Trainer._run(self)
239 self.config.seed = s
240 if self.config.checkpoint_save_mode:
--> 241 model_path.append(self.training_instructor(self.config).run())
242 else:
243 # always return the last trained model if you don't save trained model
244 model = self.inference_model_class(
245 checkpoint=self.training_instructor(self.config).run()
246 )

File /opt/conda/lib/python3.10/site-packages/pyabsa/tasks/AspectTermExtraction/instructor/atepc_instructor.py:794, in ATEPCTrainingInstructor.run(self)
793 def run(self):
--> 794 return self._train(criterion=None)

File /opt/conda/lib/python3.10/site-packages/pyabsa/framework/instructor_class/instructor_template.py:357, in BaseTrainingInstructor._train(self, criterion)
354 pass
356 # Resume training from a previously trained model
--> 357 self._resume_from_checkpoint()
359 # Initialize the learning rate scheduler if warmup_step is specified
360 if self.config.warmup_step >= 0:

File /opt/conda/lib/python3.10/site-packages/pyabsa/framework/instructor_class/instructor_template.py:455, in BaseTrainingInstructor._resume_from_checkpoint(self)
451 self.model.module.load_state_dict(
452 torch.load(state_dict_path[0])
453 )
454 else:
--> 455 self.model.load_state_dict(
456 torch.load(
457 state_dict_path[0], map_location=self.config.device
458 )
459 )
460 self.model.config = self.config
461 self.model.to(self.config.device)

File /opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py:2041, in Module.load_state_dict(self, state_dict, strict)
2036 error_msgs.insert(
2037 0, 'Missing key(s) in state_dict: {}. '.format(
2038 ', '.join('"{}"'.format(k) for k in missing_keys)))
2040 if len(error_msgs) > 0:
-> 2041 raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
2042 self.class.name, "\n\t".join(error_msgs)))
2043 return _IncompatibleKeys(missing_keys, unexpected_keys)

RuntimeError: Error(s) in loading state_dict for FAST_LCF_ATEPC:
Unexpected key(s) in state_dict: "bert4global.embeddings.position_ids". I have installed pip install pyabsa -U for V2 but showing this type of error. . I stuck now. Thank you

@yangheng95
Copy link
Owner

try pip install pyabsa==2.3.4rc0

@rajeev12111
Copy link
Author

Thank you so much for quick response . I am not able to Fine-Tuning Training for Your Own Model . I stuck now. I am not able to debug the error from this pip install pyabsa==2.3.4rc0 . Please suggest the script for Fine-Tuning Training for Your Own Model.

Thank you

@rajeev12111
Copy link
Author

Please suggest to debugging

@yangheng95
Copy link
Owner

Thank you so much for quick response . I am not able to Fine-Tuning Training for Your Own Model . I stuck now. I am not able to debug the error from this pip install pyabsa==2.3.4rc0 . Please suggest the script for Fine-Tuning Training for Your Own Model.

Thank you

I cannot well understand your reply, does the error seem like the previuous?

@rajeev12111
Copy link
Author

try pip install pyabsa==2.3.4rc0

Thank you so much for quick response . I am not able to Fine-Tuning Training for Your Own Model . I stuck now. I am not able to debug the error from this pip install pyabsa==2.3.4rc0 . Please suggest the script for Fine-Tuning Training for Your Own Model.
Thank you

I cannot well understand your reply, does the error seem like the previuous?

Yes, the error seem like the previous.

@yangheng95
Copy link
Owner

Sorry I cannot reproduce the error. Did you restart the Kernal after the update?

@rajeev12111
Copy link
Author

my code script here
from pyabsa import DatasetItem
my_dataset = DatasetItem("/kaggle/working/integrated_datasets/atepc_datasets/custom.apc.train.txt.atepc")

from pyabsa import ModelSaveOption, DeviceTypeOption
import warnings

warnings.filterwarnings("ignore")

config.batch_size = 16
config.patience = 2
config.log_step = -1
config.seed = [1, 2, 3]
config.verbose = False # If verbose == True, PyABSA will output the model strcture and seversal processed data examples
config.notice = (
"This is an training example for aspect term extraction" # for memos usage
)

trainer = ATEPC.ATEPCTrainer(
config=config,
dataset=my_dataset,
from_checkpoint="english", # if you want to resume training from our pretrained checkpoints, you can pass the checkpoint name here
auto_device=DeviceTypeOption.AUTO, # use cuda if available
checkpoint_save_mode=ModelSaveOption.SAVE_MODEL_STATE_DICT, # save state dict only instead of the whole model
load_aug=False, # there are some augmentation dataset for integrated datasets, you use them by setting load_aug=True to improve performance
)

[2023-10-09 09:22:34] (2.3.4rc0) Set Model Device: cuda:0
[2023-10-09 09:22:34] (2.3.4rc0) Device Name: Tesla T4
Downloading (…)lve/main/config.json: 100%
579/579 [00:00<00:00, 16.9kB/s]
2023-10-09 09:22:35,125 INFO: PyABSA version: 2.3.4rc0
2023-10-09 09:22:35,130 INFO: Transformers version: 4.33.0
2023-10-09 09:22:35,131 INFO: Torch version: 2.0.0+cuda11.8
2023-10-09 09:22:35,132 INFO: Device: Tesla T4
2023-10-09 09:22:35,133 INFO: custom.apc.train.txt.atepc in the trainer is not a exact path, will search dataset in current working directory
2023-10-09 09:22:35,134 INFO: Warning! auto_evaluate=True, however cannot find test set using for evaluating!
Downloading (…)okenizer_config.json: 100%
52.0/52.0 [00:00<00:00, 1.39kB/s]
Downloading spm.model: 100%
2.46M/2.46M [00:00<00:00, 24.5MB/s]
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
convert examples to features: 100%|██████████| 100/100 [00:00<00:00, 763.36it/s]
2023-10-09 09:22:37,566 INFO: Dataset Label Details: {'NEUTRAL': 11, 'POSITIVE': 63, 'NEGATIVE': 26, 'Sum': 100}

Downloading pytorch_model.bin: 100%
371M/371M [00:01<00:00, 218MB/s]
2023-10-09 09:22:41,963 INFO: Save cache dataset to fast_lcf_atepc.custom.apc.train.txt.atepc.dataset.675d8f23577290a43bfabc0f4f088e6703da03520d63e199a6202e1497c62f1b.cache
2023-10-09 09:22:47,111 INFO: cuda memory allocated:764963840
2023-10-09 09:22:47,113 INFO: ABSADatasetsVersion:None --> Calling Count:0
2023-10-09 09:22:47,114 INFO: IOB_label_to_index:{'B-ASP': 1, 'I-ASP': 2, 'O': 3, '[CLS]': 4, '[SEP]': 5} --> Calling Count:1
2023-10-09 09:22:47,115 INFO: MV:<metric_visualizer.metric_visualizer.MetricVisualizer object at 0x7ee80f108cd0> --> Calling Count:0
2023-10-09 09:22:47,116 INFO: PyABSAVersion:2.3.4rc0 --> Calling Count:1
2023-10-09 09:22:47,118 INFO: SRD:3 --> Calling Count:200
2023-10-09 09:22:47,119 INFO: TorchVersion:2.0.0+cuda11.8 --> Calling Count:1
2023-10-09 09:22:47,120 INFO: TransformersVersion:4.33.0 --> Calling Count:1
2023-10-09 09:22:47,121 INFO: auto_device:True --> Calling Count:2
2023-10-09 09:22:47,122 INFO: batch_size:16 --> Calling Count:3
2023-10-09 09:22:47,123 INFO: cache_dataset:True --> Calling Count:1
2023-10-09 09:22:47,124 INFO: checkpoint_save_mode:1 --> Calling Count:4
2023-10-09 09:22:47,125 INFO: cross_validate_fold:-1 --> Calling Count:0
2023-10-09 09:22:47,126 INFO: dataset_file:{'train': ['/kaggle/working/integrated_datasets/atepc_datasets/custom.apc.train.txt.atepc'], 'test': [], 'valid': []} --> Calling Count:5
2023-10-09 09:22:47,127 INFO: dataset_name:custom.apc.train.txt.atepc --> Calling Count:3
2023-10-09 09:22:47,128 INFO: device:cuda:0 --> Calling Count:4
2023-10-09 09:22:47,128 INFO: device_name:Tesla T4 --> Calling Count:1
2023-10-09 09:22:47,129 INFO: dropout:0.5 --> Calling Count:1
2023-10-09 09:22:47,130 INFO: dynamic_truncate:True --> Calling Count:200
2023-10-09 09:22:47,131 INFO: embed_dim:768 --> Calling Count:0
2023-10-09 09:22:47,132 INFO: evaluate_begin:0 --> Calling Count:0
2023-10-09 09:22:47,133 INFO: from_checkpoint:english --> Calling Count:0
2023-10-09 09:22:47,134 INFO: gradient_accumulation_steps:1 --> Calling Count:3
2023-10-09 09:22:47,135 INFO: hidden_dim:768 --> Calling Count:6
2023-10-09 09:22:47,136 INFO: index_to_IOB_label:{1: 'B-ASP', 2: 'I-ASP', 3: 'O', 4: '[CLS]', 5: '[SEP]'} --> Calling Count:0
2023-10-09 09:22:47,137 INFO: index_to_label:{0: 'NEGATIVE', 1: 'NEUTRAL', 2: 'POSITIVE'} --> Calling Count:1
2023-10-09 09:22:47,137 INFO: inference_model:None --> Calling Count:0
2023-10-09 09:22:47,138 INFO: initializer:xavier_uniform_ --> Calling Count:0
2023-10-09 09:22:47,139 INFO: l2reg:1e-05 --> Calling Count:2
2023-10-09 09:22:47,140 INFO: label_list:['B-ASP', 'I-ASP', 'O', '[CLS]', '[SEP]'] --> Calling Count:1
2023-10-09 09:22:47,141 INFO: label_to_index:{'NEGATIVE': 0, 'NEUTRAL': 1, 'POSITIVE': 2} --> Calling Count:0
2023-10-09 09:22:47,142 INFO: lcf:cdw --> Calling Count:0
2023-10-09 09:22:47,143 INFO: learning_rate:2e-05 --> Calling Count:1
2023-10-09 09:22:47,144 INFO: load_aug:False --> Calling Count:1
2023-10-09 09:22:47,144 INFO: log_step:-1 --> Calling Count:0
2023-10-09 09:22:47,145 INFO: logger:<Logger fast_lcf_atepc (INFO)> --> Calling Count:8
2023-10-09 09:22:47,146 INFO: max_seq_len:80 --> Calling Count:701
2023-10-09 09:22:47,147 INFO: model:<class 'pyabsa.tasks.AspectTermExtraction.models.lcf.fast_lcf_atepc.FAST_LCF_ATEPC'> --> Calling Count:5
2023-10-09 09:22:47,148 INFO: model_name:fast_lcf_atepc --> Calling Count:102
2023-10-09 09:22:47,149 INFO: model_path_to_save:checkpoints --> Calling Count:3
2023-10-09 09:22:47,149 INFO: notice:This is an training example for aspect term extraction --> Calling Count:0
2023-10-09 09:22:47,150 INFO: num_epoch:10 --> Calling Count:1
2023-10-09 09:22:47,151 INFO: num_labels:6 --> Calling Count:2
2023-10-09 09:22:47,152 INFO: optimizer:adamw --> Calling Count:2
2023-10-09 09:22:47,153 INFO: output_dim:3 --> Calling Count:1
2023-10-09 09:22:47,154 INFO: overwrite_cache:False --> Calling Count:0
2023-10-09 09:22:47,155 INFO: path_to_save:None --> Calling Count:1
2023-10-09 09:22:47,156 INFO: patience:2 --> Calling Count:0
2023-10-09 09:22:47,157 INFO: pretrained_bert:microsoft/deberta-v3-base --> Calling Count:5
2023-10-09 09:22:47,158 INFO: save_mode:1 --> Calling Count:0
2023-10-09 09:22:47,159 INFO: seed:1 --> Calling Count:6
2023-10-09 09:22:47,160 INFO: sep_indices:2 --> Calling Count:0
2023-10-09 09:22:47,161 INFO: show_metric:False --> Calling Count:0
2023-10-09 09:22:47,161 INFO: spacy_model:en_core_web_sm --> Calling Count:1
2023-10-09 09:22:47,162 INFO: srd_alignment:True --> Calling Count:0
2023-10-09 09:22:47,163 INFO: task_code:ATEPC --> Calling Count:1
2023-10-09 09:22:47,164 INFO: task_name:Aspect Term Extraction and Polarity Classification --> Calling Count:0
2023-10-09 09:22:47,165 INFO: use_amp:False --> Calling Count:1
2023-10-09 09:22:47,166 INFO: use_bert_spc:True --> Calling Count:0
2023-10-09 09:22:47,167 INFO: use_syntax_based_SRD:False --> Calling Count:100
2023-10-09 09:22:47,168 INFO: verbose:False --> Calling Count:0
2023-10-09 09:22:47,169 INFO: warmup_step:-1 --> Calling Count:0
2023-10-09 09:22:47,170 INFO: window:lr --> Calling Count:0
2023-10-09 09:22:47,176 INFO: cuda memory allocated:764963840
2023-10-09 09:22:47,177 INFO: ABSADatasetsVersion:None --> Calling Count:0
2023-10-09 09:22:47,178 INFO: IOB_label_to_index:{'B-ASP': 1, 'I-ASP': 2, 'O': 3, '[CLS]': 4, '[SEP]': 5} --> Calling Count:1
2023-10-09 09:22:47,181 INFO: MV:<metric_visualizer.metric_visualizer.MetricVisualizer object at 0x7ee80f108cd0> --> Calling Count:0
2023-10-09 09:22:47,182 INFO: PyABSAVersion:2.3.4rc0 --> Calling Count:1
2023-10-09 09:22:47,185 INFO: SRD:3 --> Calling Count:200
2023-10-09 09:22:47,186 INFO: TorchVersion:2.0.0+cuda11.8 --> Calling Count:1
2023-10-09 09:22:47,187 INFO: TransformersVersion:4.33.0 --> Calling Count:1
2023-10-09 09:22:47,188 INFO: auto_device:True --> Calling Count:3
2023-10-09 09:22:47,189 INFO: batch_size:16 --> Calling Count:3
2023-10-09 09:22:47,192 INFO: cache_dataset:True --> Calling Count:1
2023-10-09 09:22:47,192 INFO: checkpoint_save_mode:1 --> Calling Count:4
2023-10-09 09:22:47,195 INFO: cross_validate_fold:-1 --> Calling Count:1
2023-10-09 09:22:47,196 INFO: dataset_file:{'train': ['/kaggle/working/integrated_datasets/atepc_datasets/custom.apc.train.txt.atepc'], 'test': [], 'valid': []} --> Calling Count:5
2023-10-09 09:22:47,197 INFO: dataset_name:custom.apc.train.txt.atepc --> Calling Count:3
2023-10-09 09:22:47,200 INFO: device:cuda:0 --> Calling Count:8
2023-10-09 09:22:47,201 INFO: device_name:Tesla T4 --> Calling Count:1
2023-10-09 09:22:47,202 INFO: dropout:0.5 --> Calling Count:1
2023-10-09 09:22:47,204 INFO: dynamic_truncate:True --> Calling Count:200
2023-10-09 09:22:47,205 INFO: embed_dim:768 --> Calling Count:0
2023-10-09 09:22:47,208 INFO: evaluate_begin:0 --> Calling Count:0
2023-10-09 09:22:47,209 INFO: from_checkpoint:english --> Calling Count:0
2023-10-09 09:22:47,210 INFO: gradient_accumulation_steps:1 --> Calling Count:3
2023-10-09 09:22:47,211 INFO: hidden_dim:768 --> Calling Count:6
2023-10-09 09:22:47,214 INFO: index_to_IOB_label:{1: 'B-ASP', 2: 'I-ASP', 3: 'O', 4: '[CLS]', 5: '[SEP]'} --> Calling Count:0
2023-10-09 09:22:47,215 INFO: index_to_label:{0: 'NEGATIVE', 1: 'NEUTRAL', 2: 'POSITIVE'} --> Calling Count:1
2023-10-09 09:22:47,216 INFO: inference_model:None --> Calling Count:0
2023-10-09 09:22:47,218 INFO: initializer:xavier_uniform
--> Calling Count:0
2023-10-09 09:22:47,222 INFO: l2reg:1e-05 --> Calling Count:2
2023-10-09 09:22:47,223 INFO: label_list:['B-ASP', 'I-ASP', 'O', '[CLS]', '[SEP]'] --> Calling Count:1
2023-10-09 09:22:47,224 INFO: label_to_index:{'NEGATIVE': 0, 'NEUTRAL': 1, 'POSITIVE': 2} --> Calling Count:0
2023-10-09 09:22:47,225 INFO: lcf:cdw --> Calling Count:0
2023-10-09 09:22:47,226 INFO: learning_rate:2e-05 --> Calling Count:1
2023-10-09 09:22:47,228 INFO: load_aug:False --> Calling Count:1
2023-10-09 09:22:47,229 INFO: log_step:-1 --> Calling Count:0
2023-10-09 09:22:47,232 INFO: logger:<_Logger fast_lcf_atepc (INFO)> --> Calling Count:8
2023-10-09 09:22:47,233 INFO: max_seq_len:80 --> Calling Count:701
2023-10-09 09:22:47,234 INFO: model:<class 'pyabsa.tasks.AspectTermExtraction.models.lcf.fast_lcf_atepc.FAST_LCF_ATEPC'> --> Calling Count:5
2023-10-09 09:22:47,236 INFO: model_name:fast_lcf_atepc --> Calling Count:102
2023-10-09 09:22:47,237 INFO: model_path_to_save:checkpoints --> Calling Count:3
2023-10-09 09:22:47,240 INFO: notice:This is an training example for aspect term extraction --> Calling Count:0
2023-10-09 09:22:47,241 INFO: num_epoch:10 --> Calling Count:1
2023-10-09 09:22:47,242 INFO: num_labels:6 --> Calling Count:2
2023-10-09 09:22:47,243 INFO: optimizer:adamw --> Calling Count:2
2023-10-09 09:22:47,244 INFO: output_dim:3 --> Calling Count:1
2023-10-09 09:22:47,246 INFO: overwrite_cache:False --> Calling Count:0
2023-10-09 09:22:47,247 INFO: path_to_save:None --> Calling Count:1
2023-10-09 09:22:47,250 INFO: patience:2 --> Calling Count:0
2023-10-09 09:22:47,251 INFO: pretrained_bert:microsoft/deberta-v3-base --> Calling Count:5
2023-10-09 09:22:47,252 INFO: save_mode:1 --> Calling Count:0
2023-10-09 09:22:47,253 INFO: seed:1 --> Calling Count:6
2023-10-09 09:22:47,255 INFO: sep_indices:2 --> Calling Count:0
2023-10-09 09:22:47,256 INFO: show_metric:False --> Calling Count:0
2023-10-09 09:22:47,257 INFO: spacy_model:en_core_web_sm --> Calling Count:1
2023-10-09 09:22:47,259 INFO: srd_alignment:True --> Calling Count:0
2023-10-09 09:22:47,260 INFO: task_code:ATEPC --> Calling Count:1
2023-10-09 09:22:47,261 INFO: task_name:Aspect Term Extraction and Polarity Classification --> Calling Count:0
2023-10-09 09:22:47,264 INFO: tokenizer:DebertaV2TokenizerFast(name_or_path='microsoft/deberta-v3-base', vocab_size=128000, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '[CLS]', 'eos_token': '[SEP]', 'unk_token': '[UNK]', 'sep_token': '[SEP]', 'pad_token': '[PAD]', 'cls_token': '[CLS]', 'mask_token': '[MASK]'}, clean_up_tokenization_spaces=True) --> Calling Count:0
2023-10-09 09:22:47,265 INFO: use_amp:False --> Calling Count:1
2023-10-09 09:22:47,266 INFO: use_bert_spc:True --> Calling Count:0
2023-10-09 09:22:47,268 INFO: use_syntax_based_SRD:False --> Calling Count:100
2023-10-09 09:22:47,269 INFO: verbose:False --> Calling Count:1
2023-10-09 09:22:47,272 INFO: warmup_step:-1 --> Calling Count:0
2023-10-09 09:22:47,273 INFO: window:lr --> Calling Count:0
[2023-10-09 09:22:47] (2.3.4rc0) Downloading checkpoint:english
[2023-10-09 09:22:47] (2.3.4rc0) Notice: The pretrained model are used for testing, it is recommended to train the model on your own custom datasets
Downloading checkpoint: 579MB [00:08, 68.25MB/s]
Find zipped checkpoint: ./checkpoints/ATEPC_ENGLISH_CHECKPOINT/fast_lcf_atepc_English_cdw_apcacc_82.36_apcf1_81.89_atef1_75.43.zip, unzipping

Done.
[2023-10-09 09:23:03] (2.3.4rc0) If the auto-downloading failed, please download it via browser: https://huggingface.co/spaces/yangheng/PyABSA/resolve/main/checkpoints/English/ATEPC/fast_lcf_atepc_English_cdw_apcacc_82.36_apcf1_81.89_atef1_75.43.zip
2023-10-09 09:23:03,138 INFO: Checkpoint downloaded at: checkpoints/ATEPC_ENGLISH_CHECKPOINT/fast_lcf_atepc_English_cdw_apcacc_82.36_apcf1_81.89_atef1_75.43
2023-10-09 09:23:03,828 INFO: Resume trainer from Checkpoint: checkpoints/ATEPC_ENGLISH_CHECKPOINT/fast_lcf_atepc_English_cdw_apcacc_82.36_apcf1_81.89_atef1_75.43!
2023-10-09 09:23:03,829 INFO: ***** Running training for Aspect Term Extraction and Polarity Classification *****
2023-10-09 09:23:03,831 INFO: Num examples = 100
2023-10-09 09:23:03,832 INFO: Batch size = 16
2023-10-09 09:23:03,834 INFO: Num steps = 60
Epoch: 0 | Smooth Loss: 1.0063: 100%|██████████| 7/7 [00:07<00:00, 1.08s/it]
Epoch: 1 | Smooth Loss: 0.8769: 100%|██████████| 7/7 [00:05<00:00, 1.28it/s]

TypeError Traceback (most recent call last)
Cell In[10], line 15
10 config.verbose = False # If verbose == True, PyABSA will output the model strcture and seversal processed data examples
11 config.notice = (
12 "This is an training example for aspect term extraction" # for memos usage
13 )
---> 15 trainer = ATEPC.ATEPCTrainer(
16 config=config,
17 dataset=my_dataset,
18 from_checkpoint="english", # if you want to resume training from our pretrained checkpoints, you can pass the checkpoint name here
19 auto_device=DeviceTypeOption.AUTO, # use cuda if available
20 checkpoint_save_mode=ModelSaveOption.SAVE_MODEL_STATE_DICT, # save state dict only instead of the whole model
21 load_aug=False, # there are some augmentation dataset for integrated datasets, you use them by setting load_aug=True to improve performance
22 )

File /opt/conda/lib/python3.10/site-packages/pyabsa/tasks/AspectTermExtraction/trainer/atepc_trainer.py:69, in ATEPCTrainer.init(self, config, dataset, from_checkpoint, checkpoint_save_mode, auto_device, path_to_save, load_aug)
64 self.config.task_code = TaskCodeOption.Aspect_Term_Extraction_and_Classification
65 self.config.task_name = TaskNameOption().get(
66 TaskCodeOption.Aspect_Term_Extraction_and_Classification
67 )
---> 69 self._run()

File /opt/conda/lib/python3.10/site-packages/pyabsa/framework/trainer_class/trainer_template.py:241, in Trainer._run(self)
239 self.config.seed = s
240 if self.config.checkpoint_save_mode:
--> 241 model_path.append(self.training_instructor(self.config).run())
242 else:
243 # always return the last trained model if you don't save trained model
244 model = self.inference_model_class(
245 checkpoint=self.training_instructor(self.config).run()
246 )

File /opt/conda/lib/python3.10/site-packages/pyabsa/tasks/AspectTermExtraction/instructor/atepc_instructor.py:794, in ATEPCTrainingInstructor.run(self)
793 def run(self):
--> 794 return self._train(criterion=None)

File /opt/conda/lib/python3.10/site-packages/pyabsa/framework/instructor_class/instructor_template.py:374, in BaseTrainingInstructor._train(self, criterion)
371 return self._k_fold_train_and_evaluate(criterion)
372 # Train and evaluate the model if there is only one validation dataloader
373 else:
--> 374 return self._train_and_evaluate(criterion)

File /opt/conda/lib/python3.10/site-packages/pyabsa/tasks/AspectTermExtraction/instructor/atepc_instructor.py:492, in ATEPCTrainingInstructor._train_and_evaluate(self, criterion)
489 if patience == 0:
490 break
--> 492 apc_result, ate_result = self._evaluate_acc_f1(self.test_dataloader)
494 if self.valid_set and self.test_set:
495 self.config.MV.log_metric(
496 self.config.model_name
497 + "-"
(...)
502 apc_result["apc_test_acc"],
503 )

File /opt/conda/lib/python3.10/site-packages/pyabsa/tasks/AspectTermExtraction/instructor/atepc_instructor.py:604, in ATEPCTrainingInstructor._evaluate_acc_f1(self, test_dataloader, eval_ATE, eval_APC)
601 self.model.eval()
602 label_map = {i: label for i, label in enumerate(self.config.label_list, 1)}
--> 604 for i_batch, batch in enumerate(test_dataloader):
605 (
606 input_ids_spc,
607 segment_ids,
(...)
614 lcf_cdw_vec,
615 ) = batch
617 input_ids_spc = input_ids_spc.to(self.config.device)

TypeError: 'NoneType' object is not iterable.

This is error, please debug it

@yangheng95
Copy link
Owner

You just forget to annotate the test dataset

@rajeev12111
Copy link
Author

Thank you so much

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants