diff --git a/examples/run_coqa_at.py b/examples/run_coqa_at.py new file mode 100644 index 00000000000000..c34edb71307bf8 --- /dev/null +++ b/examples/run_coqa_at.py @@ -0,0 +1,584 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Finetuning the library models for question-answering on CoQA (DistilBERT, Bert, XLM, XLNet).""" + +from __future__ import absolute_import, division, print_function + +import argparse +import logging +import os +import random +import glob + +import numpy as np +import torch +from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, + TensorDataset) +from torch.utils.data.distributed import DistributedSampler + +try: + from torch.utils.tensorboard import SummaryWriter +except: + from tensorboardX import SummaryWriter + +from tqdm import tqdm, trange + +from transformers import (WEIGHTS_NAME, BertConfig, BertForQuestionAnswering, BertTokenizer, + RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer, + XLMConfig, XLMForQuestionAnswering, XLMTokenizer, + XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer, + DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) + +from transformers import AdamW, WarmupLinearSchedule + +from utils_coqa import (read_coqa_examples, convert_examples_to_features, + RawResult, write_predictions, write_predictions_v2, + RawResultExtended, write_predictions_extended) + +# The follwing import is the official CoQA evaluation script (2.0). +# You can remove it from the dependencies if you are using this script outside of the library +# We've added it here for automated tests (see examples/test_examples.py file) +from utils_coqa_evaluate import EVAL_OPTS, main as evaluate_on_coqa + +logger = logging.getLogger(__name__) + +ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \ + for conf in (BertConfig, RobertaConfig, XLNetConfig, XLMConfig, DistilBertConfig)), ()) + +MODEL_CLASSES = { + 'bert': (BertConfig, BertForQuestionAnswering, BertTokenizer), + 'roberta': (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer), + 'xlnet': (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer), + 'xlm': (XLMConfig, XLMForQuestionAnswering, XLMTokenizer), + 'distilbert': (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) +} + +def set_seed(args): + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if args.n_gpu > 0: + torch.cuda.manual_seed_all(args.seed) + +def to_list(tensor): + return tensor.detach().cpu().tolist() + +def train(args, train_dataset, model, tokenizer): + """ Train the model """ + if args.local_rank in [-1, 0]: + tb_writer = SummaryWriter() + + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) + train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) + + if args.max_steps > 0: + t_total = args.max_steps + args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 + else: + t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs + + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + output_device=args.local_rank, + find_unused_parameters=True) + + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + global_step = 0 + tr_loss, logging_loss = 0.0, 0.0 + model.zero_grad() + train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) + set_seed(args) # Added here for reproductibility (even between python 2 and 3) + for _ in train_iterator: + epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) + for step, batch in enumerate(epoch_iterator): + model.train() + batch = tuple(t.to(args.device) for t in batch) + inputs = { + 'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': None if args.model_type in ['xlm', 'roberta', 'distilbert'] else batch[2], + 'start_positions': batch[3], + 'end_positions': batch[4], + 'is_impossible': batch[5] if args.model_type in ['xlnet'] else None, + 'cls_index': batch[6] if args.model_type in ['xlnet', 'xlm'] else None, + 'p_mask': batch[7] if args.model_type in ['xlnet', 'xlm'] else None + } + outputs = model(**inputs) + loss = outputs[0] # model outputs are always tuple in transformers (see doc) + + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + loss.backward() + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + + tr_loss += loss.item() + if (step + 1) % args.gradient_accumulation_steps == 0: + optimizer.step() + scheduler.step() # Update learning rate schedule + model.zero_grad() + global_step += 1 + + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: + # Log metrics + if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + results = evaluate(args, model, tokenizer) + for key, value in results.items(): + tb_writer.add_scalar('eval_{}'.format(key), value, global_step) + tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) + tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) + logging_loss = tr_loss + + if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: + # Save model checkpoint + output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training + model_to_save.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) + logger.info("Saving model checkpoint to %s", output_dir) + + if args.max_steps > 0 and global_step > args.max_steps: + epoch_iterator.close() + break + if args.max_steps > 0 and global_step > args.max_steps: + train_iterator.close() + break + + if args.local_rank in [-1, 0]: + tb_writer.close() + + return global_step, tr_loss / global_step + + +def evaluate(args, model, tokenizer, prefix=""): + dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) + + if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: + os.makedirs(args.output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) + eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # Eval! + logger.info("***** Running evaluation {} *****".format(prefix)) + logger.info(" Num examples = %d", len(dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + all_results = [] + for batch in tqdm(eval_dataloader, desc="Evaluating"): + model.eval() + batch = tuple(t.to(args.device) for t in batch) + with torch.no_grad(): + inputs = { + 'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': None if args.model_type in ['xlm', 'roberta', 'distilbert'] else batch[2], + 'cls_index': batch[4] if args.model_type in ['xlnet', 'xlm'] else None, + 'p_mask': batch[5] if args.model_type in ['xlnet', 'xlm'] else None + } + example_indices = batch[3] + outputs = model(**inputs) + + for i, example_index in enumerate(example_indices): + eval_feature = features[example_index.item()] + unique_id = int(eval_feature.unique_id) + if args.model_type in ['xlnet', 'xlm']: + # XLNet uses a more complex post-processing procedure + result = RawResultExtended(unique_id = unique_id, + start_top_log_probs = to_list(outputs[0][i]), + start_top_index = to_list(outputs[1][i]), + end_top_log_probs = to_list(outputs[2][i]), + end_top_index = to_list(outputs[3][i]), + cls_logits = to_list(outputs[4][i])) + else: + result = RawResult(unique_id = unique_id, + start_logits = to_list(outputs[0][i]), + end_logits = to_list(outputs[1][i])) + all_results.append(result) + + # Compute predictions + output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) + output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) + if args.version_2_with_negative: + output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) + else: + output_null_log_odds_file = None + + if args.model_type in ['xlnet', 'xlm']: + # XLNet uses a more complex post-processing procedure + write_predictions_extended(examples, features, all_results, args.n_best_size, + args.max_answer_length, output_prediction_file, + output_nbest_file, output_null_log_odds_file, args.predict_file, + model.config.start_n_top, model.config.end_n_top, + args.version_2_with_negative, tokenizer, args.do_lower_case, args.verbose_logging) + elif args.model_type in ['roberta']: + write_predictions_v2(examples, features, all_results, args.n_best_size, + args.max_answer_length, args.do_lower_case, output_prediction_file, + output_nbest_file, output_null_log_odds_file, args.verbose_logging, + args.answer_score_threshold, tokenizer, args.enable_special_answer) + else: + write_predictions(examples, features, all_results, args.n_best_size, + args.max_answer_length, args.do_lower_case, output_prediction_file, + output_nbest_file, output_null_log_odds_file, args.verbose_logging, + args.version_2_with_negative, args.null_score_diff_threshold) + + # Evaluate with the official CoQA script + output_evaluation_file = os.path.join(args.output_dir, "evaluation_{}.metric".format(prefix)) + evaluate_options = EVAL_OPTS(data_file=args.predict_file, + pred_file=output_prediction_file, + o=output_evaluation_file) + results = evaluate_on_coqa(evaluate_options) + return results + + +def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): + if args.local_rank not in [-1, 0] and not evaluate: + torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache + + # Load data features from cache or dataset file + input_file = args.predict_file if evaluate else args.train_file + cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format( + 'dev' if evaluate else 'train', + list(filter(None, args.model_name_or_path.split('/'))).pop(), + str(args.max_seq_length))) + if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: + logger.info("Loading features from cached file %s", cached_features_file) + features = torch.load(cached_features_file) + else: + logger.info("Creating features from dataset file at %s", input_file) + examples = read_coqa_examples(input_file=input_file, + is_training=not evaluate, + max_answer_length=args.max_answer_length) + features = convert_examples_to_features(examples=examples, + tokenizer=tokenizer, + max_seq_length=args.max_seq_length, + doc_stride=args.doc_stride, + max_query_length=args.max_query_length, + is_training=not evaluate, + cls_token_at_end=bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=tokenizer.cls_token, + sep_token=tokenizer.sep_token, + pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], + cls_token_segment_id=2 if args.model_type in ['xlnet'] else 0, + pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0, + sep_token_extra=bool(args.model_type in ['roberta']), + add_prefix_space=bool(args.model_type in ['roberta']), + enable_special_answer=args.enable_special_answer) + + if args.local_rank in [-1, 0]: + logger.info("Saving features into cached file %s", cached_features_file) + torch.save(features, cached_features_file) + + if args.local_rank == 0 and not evaluate: + torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache + + # Convert to Tensors and build dataset + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) + all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) + all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) + all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) + all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) + if evaluate: + all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) + dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, + all_example_index, all_cls_index, all_p_mask) + else: + all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) + all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) + all_is_impossible = torch.tensor([0.0] * len(features), dtype=torch.float) + dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, + all_start_positions, all_end_positions, all_is_impossible, + all_cls_index, all_p_mask) + + if output_examples: + return dataset, examples, features + return dataset + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--train_file", default=None, type=str, required=True, + help="CoQA json for training. E.g., train-v1.1.json") + parser.add_argument("--predict_file", default=None, type=str, required=True, + help="CoQA json for predictions. E.g., dev-v1.1.json or test-v1.1.json") + parser.add_argument("--model_type", default=None, type=str, required=True, + help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) + parser.add_argument("--model_name_or_path", default=None, type=str, required=True, + help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model checkpoints and predictions will be written.") + + ## Other parameters + parser.add_argument("--config_name", default="", type=str, + help="Pretrained config name or path if not the same as model_name") + parser.add_argument("--tokenizer_name", default="", type=str, + help="Pretrained tokenizer name or path if not the same as model_name") + parser.add_argument("--cache_dir", default="", type=str, + help="Where do you want to store the pre-trained models downloaded from s3") + + parser.add_argument('--version_2_with_negative', action='store_true', + help='If true, the CoQA examples contain some that do not have an answer.') + parser.add_argument('--null_score_diff_threshold', type=float, default=0.0, + help="If null_score - best_non_null is greater than the threshold predict null.") + parser.add_argument('--answer_score_threshold', type=float, default=0.5, + help="If null_score is greater than the threshold predict null.") + parser.add_argument('--enable_special_answer', action='store_true', + help='If true, the CoQA examples will concatenate with special answer tokens.') + + parser.add_argument("--max_seq_length", default=384, type=int, + help="The maximum total input sequence length after WordPiece tokenization. Sequences " + "longer than this will be truncated, and sequences shorter than this will be padded.") + parser.add_argument("--doc_stride", default=128, type=int, + help="When splitting up a long document into chunks, how much stride to take between chunks.") + parser.add_argument("--max_query_length", default=64, type=int, + help="The maximum number of tokens for the question. Questions longer than this will " + "be truncated to this length.") + parser.add_argument("--do_train", action='store_true', + help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--evaluate_during_training", action='store_true', + help="Rul evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + + parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument("--learning_rate", default=5e-5, type=float, + help="The initial learning rate for Adam.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--weight_decay", default=0.0, type=float, + help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, + help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, + help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=3.0, type=float, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, + help="Linear warmup over warmup_steps.") + parser.add_argument("--n_best_size", default=20, type=int, + help="The total number of n-best predictions to generate in the nbest_predictions.json output file.") + parser.add_argument("--max_answer_length", default=30, type=int, + help="The maximum length of an answer that can be generated. This is needed because the start " + "and end predictions are not conditioned on one another.") + parser.add_argument("--verbose_logging", action='store_true', + help="If true, all of the warnings related to data processing will be printed. " + "A number of warnings are expected for a normal CoQA evaluation.") + + parser.add_argument('--logging_steps', type=int, default=50, + help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=50, + help="Save checkpoint every X updates steps.") + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', + help="Whether not to use CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', + help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', + help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, + help="random seed for initialization") + + parser.add_argument("--local_rank", type=int, default=-1, + help="local_rank for distributed training on gpus") + parser.add_argument('--fp16', action='store_true', + help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") + args = parser.parse_args() + + if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: + raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + args.device = device + + # Setup logging + logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', + level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) + + # Set seed + set_seed(args) + if args.enable_special_answer: + additional_special_tokens = [ + '', '', + '','', + '','','','','','', + '','','','','', + '','' + ] + else: + additional_special_tokens = ['', ''] + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + args.model_type = args.model_type.lower() + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) + tokenizer_model_name_or_path = args.tokenizer_name if args.tokenizer_name else args.model_name_or_path + tokenizer = tokenizer_class.from_pretrained(tokenizer_model_name_or_path, do_lower_case=args.do_lower_case) + tokenizer.add_special_tokens({ "additional_special_tokens": additional_special_tokens }) + model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + model.resize_token_embeddings(config.vocab_size + len(additional_special_tokens)) + + if args.local_rank == 0: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + model.to(args.device) + + logger.info("Training/evaluation parameters %s", args) + + # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. + # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will + # remove the need for this code, but it is still valid. + if args.fp16: + try: + import apex + apex.amp.register_half_function(torch, 'einsum') + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + + # Training + if args.do_train: + train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) + global_step, tr_loss = train(args, train_dataset, model, tokenizer) + logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) + + + # Save the trained model and the tokenizer + if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): + # Create output directory if needed + if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: + os.makedirs(args.output_dir) + + logger.info("Saving model checkpoint to %s", args.output_dir) + # Save a trained model, configuration and tokenizer using `save_pretrained()`. + # They can then be reloaded using `from_pretrained()` + model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training + model_to_save.save_pretrained(args.output_dir) + tokenizer.save_pretrained(args.output_dir) + + # Good practice: save your training arguments together with the trained model + torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) + + # Load a trained model and vocabulary that you have fine-tuned + model = model_class.from_pretrained(args.output_dir) + tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) + model.to(args.device) + + + # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory + results = {} + if args.do_eval and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs + + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + # Reload the model + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint) + model.to(args.device) + + # Evaluate + result = evaluate(args, model, tokenizer, prefix=global_step) + + result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items()) + results.update(result) + + logger.info("Results: {}".format(results)) + + return results + + +if __name__ == "__main__": + main() diff --git a/examples/utils_coqa_at.py b/examples/utils_coqa_at.py new file mode 100644 index 00000000000000..1cf8d9601da5e6 --- /dev/null +++ b/examples/utils_coqa_at.py @@ -0,0 +1,1756 @@ + +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Load CoQA dataset. """ + +from __future__ import absolute_import, division, print_function + +import json +import logging +import math +import string +import collections +from io import open + +from transformers.tokenization_bert import BasicTokenizer + +# Required by XLNet evaluation method to compute optimal threshold (see write_predictions_extended() method) +from utils_coqa_evaluate import find_all_best_thresh_v2, make_qid_to_has_ans, get_raw_scores, CoQAEvaluator + +logger = logging.getLogger(__name__) + + +class CoqaExample(object): + """ + A single training/test example for the CoQA dataset. + For examples without an answer, the start and end position are -1. + """ + + def __init__(self, + qas_id, + question_text, + doc_tokens, + orig_answer_text=None, + start_position=None, + end_position=None, + answer_type=None, + answer_subtype=None, + is_skipped=None): + self.qas_id = qas_id + self.question_text = question_text + self.doc_tokens = doc_tokens + self.orig_answer_text = orig_answer_text + self.start_position = start_position + self.end_position = end_position + self.answer_type = answer_type + self.answer_subtype = answer_subtype + self.is_skipped = is_skipped + + def __str__(self): + return self.__repr__() + + def __repr__(self): + s = "" + s += "qas_id: %s" % (self.qas_id) + s += ", question_text: %s" % (self.question_text) + s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) + if self.orig_answer_text: + s += ", orig_answer_text: %s" % (self.orig_answer_text) + if self.start_position: + s += ", start_position: %d" % (self.start_position) + if self.end_position: + s += ", end_position: %d" % (self.end_position) + if self.answer_type: + s += ", answer_type: %s" % (self.answer_type) + if self.answer_subtype: + s += ", answer_subtype: %s" % (self.answer_subtype) + return s + + +class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, + unique_id, + example_index, + doc_span_index, + tokens, + token_to_orig_map, + token_is_max_context, + input_ids, + input_mask, + segment_ids, + cls_index, + p_mask, + paragraph_len, + start_position=None, + end_position=None): + self.unique_id = unique_id + self.example_index = example_index + self.doc_span_index = doc_span_index + self.tokens = tokens + self.token_to_orig_map = token_to_orig_map + self.token_is_max_context = token_is_max_context + self.input_ids = input_ids + self.input_mask = input_mask + self.segment_ids = segment_ids + self.cls_index = cls_index + self.p_mask = p_mask + self.paragraph_len = paragraph_len + self.start_position = start_position + self.end_position = end_position + + +def read_coqa_examples(input_file, is_training, max_answer_length): + """Read a CoQA json file into a list of CoqaExample.""" + with open(input_file, "r", encoding='utf-8') as reader: + input_data = json.load(reader)["data"] + + def is_whitespace(c): + if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: + return True + return False + + def whitespace_tokenize(text): + word_spans = [] + char_list = [] + for idx, char in enumerate(text): + if char != ' ': + char_list.append(idx) + continue + + if char_list: + word_start = char_list[0] + word_end = char_list[-1] + word_text = text[word_start:word_end+1] + word_spans.append((word_text, word_start, word_end)) + char_list.clear() + + if char_list: + word_start = char_list[0] + word_end = char_list[-1] + word_text = text[word_start:word_end+1] + word_spans.append((word_text, word_start, word_end)) + + return word_spans + + def char_span_to_word_span(char_start, + char_end, + word_spans): + word_idx_list = [] + for word_idx, (_, start, end) in enumerate(word_spans): + if end >= char_start: + if start <= char_end: + word_idx_list.append(word_idx) + else: + break + + if word_idx_list: + word_start = word_idx_list[0] + word_end = word_idx_list[-1] + else: + word_start = -1 + word_end = -1 + + return word_start, word_end + + def search_best_span(context_tokens, + answer_tokens): + best_f1 = 0.0 + best_match = "" + best_start, best_end = -1, -1 + search_index = [idx for idx in range(len(context_tokens)) if context_tokens[idx][0] in answer_tokens] + + stop_word_list = ["with", "at", "in", "on", "and", "or", "to", "by", "of", "for", "the", "a", "an", + "is", "are", "was", "were", "has", "have", "his", "her", "my", "your", "its", "their", + "he", "she", "i", "you", "it", "they", "he was", "he is", "she was", "she is", + "it was", "it is", "they were", "they are"] + + for i in range(len(search_index)): + for j in range(i, len(search_index)): + candidate_tokens = [context_tokens[k][0] for k in range(search_index[i], search_index[j]+1) if context_tokens[k][0]] + common = collections.Counter(candidate_tokens) & collections.Counter(answer_tokens) + num_common = sum(common.values()) + if num_common > 0: + precision = 1.0 * num_common / len(candidate_tokens) + recall = 1.0 * num_common / len(answer_tokens) + f1 = (2 * precision * recall) / (precision + recall) + match = " ".join(candidate_tokens).lower().strip() + start = context_tokens[search_index[i]][1] + end = context_tokens[search_index[j]][2] + + if (f1 != 1.0 and (match in stop_word_list or + (len(candidate_tokens) > len(answer_tokens) and + candidate_tokens[0] in stop_word_list and + candidate_tokens[-1] in stop_word_list))): + f1 = 0.0 + match = "" + start, end = -1, -1 + + if f1 > best_f1: + best_f1 = f1 + best_match = match + best_start = start + best_end = end + + return best_f1, best_start, best_end + + def get_question_text(history, + question): + question_tokens = [''] + question["input_text"].split(' ') + return " ".join(history + [" ".join(question_tokens)]) + + def get_question_history(history, + question, + answer, + num_turn): + question_tokens = [] + question_tokens.extend([''] + question["input_text"].split(' ')) + question_tokens.extend([''] + answer["input_text"].split(' ')) + + question_text = " ".join(question_tokens) + if question_text: + history.append(question_text) + + if num_turn >= 0 and len(history) > num_turn: + history = history[-num_turn:] + + return history + + def find_answer_span(answer_text, + rationale_text, + rationale_start, + rationale_end): + idx = rationale_text.find(answer_text) + answer_start = rationale_start + idx + answer_end = answer_start + len(answer_text) - 1 + + return answer_start, answer_end + + def match_answer_span(answer_text, + rationale_start, + rationale_end, + paragraph_text, + norm_func=None): + if not norm_func: + norm_func = CoQAEvaluator.normalize_answer + + answer_tokens = whitespace_tokenize(answer_text) + answer_norm_tokens = [norm_func(token) for token, _, _ in answer_tokens] + answer_norm_tokens = [norm_token for norm_token in answer_norm_tokens if norm_token] + + if not answer_norm_tokens: + return 0.0, -1, -1 + + paragraph_tokens = whitespace_tokenize(paragraph_text) + + rationale_match_score = 0.0 + rationale_answer_start = -1 + rationale_answer_end = -1 + if not (rationale_start == -1 or rationale_end == -1): + rationale_word_start, rationale_word_end = char_span_to_word_span(rationale_start, rationale_end, paragraph_tokens) + rationale_tokens = paragraph_tokens[rationale_word_start:rationale_word_end+1] + rationale_norm_tokens = [(norm_func(token), start, end) for token, start, end in rationale_tokens] + (rationale_match_score, rationale_answer_start, + rationale_answer_end) = search_best_span(rationale_norm_tokens, answer_norm_tokens) + + paragraph_norm_tokens = [(norm_func(token), start, end) for token, start, end in paragraph_tokens] + (paragraph_match_score, paragraph_answer_start, + paragraph_answer_end) = search_best_span(paragraph_norm_tokens, answer_norm_tokens) + + if rationale_match_score >= paragraph_match_score: + match_score = rationale_match_score + answer_start = rationale_answer_start + answer_end = rationale_answer_end + else: + match_score = paragraph_match_score + answer_start = paragraph_answer_start + answer_end = paragraph_answer_end + + if match_score > 0.0: + return match_score, answer_start, answer_end + + return 0.0, -1, -1 + + def get_answer_span(answer, + paragraph_text): + input_text = answer["input_text"].strip().lower() + span_start, span_end = answer["span_start"], answer["span_end"] + if span_start == -1 or span_end == -1: + span_text = "" + else: + span_text = paragraph_text[span_start:span_end].lower() + + if input_text in span_text: + span_start, span_end = find_answer_span(input_text, span_text, span_start, span_end) + match_score = 1.0 + else: + match_score_1, span_start_1, span_end_1 = match_answer_span(input_text, span_start, span_end, paragraph_text.lower()) + match_score_2, span_start_2, span_end_2 = match_answer_span(input_text, + span_start, span_end, paragraph_text.lower(), normalize_token) + if match_score_2 > match_score_1: + span_start, span_end = span_start_2, span_end_2 + match_score = match_score_2 + else: + span_start, span_end = span_start_1, span_end_1 + match_score = match_score_1 + + if span_start == -1 or span_end == -1: + answer_text = "" + is_skipped = True + else: + answer_text = paragraph_text[span_start:span_end+1] + is_skipped = False + + return answer_text, span_start, span_end, match_score, is_skipped + + def normalize_answer(answer): + norm_answer = CoQAEvaluator.normalize_answer(answer) + + if norm_answer in ["yes", "yese", "ye", "es", "definitely"]: + return "yes" + + if norm_answer in ["no", "no not at all", "not", "not at all", "not yet", "not really"]: + return "no" + + number_lookup = { + "zero": "none", "0": "none", "1": "one", "2": "two", "3": "three", "4": "four", + "5": "five", "6": "six", "7": "seven", "8": "eight", "9": "nine", "10": "ten" + } + + if norm_answer in number_lookup: + return number_lookup[norm_answer] + + if norm_answer.startswith("at least") and norm_answer[8:].strip().lower() in number_lookup: + norm_answer = norm_answer[8:].strip().lower() + return number_lookup[norm_answer] + + return norm_answer + + def normalize_token(token): + if token.endswith("'s"): + token = token[:-2] + + if (token.endswith("st") or token.endswith("nd") or token.endswith("rd") or token.endswith("th")) and token[:-2].isnumeric(): + token = token[:-2] + + if "-" in token: + token = token.split("-")[0] + + if "," in token and not token.split(",")[0].isnumeric(): + token = token.split(",")[0] + + norm_token = CoQAEvaluator.normalize_answer(token) + + return norm_token + + def get_answer_type(question, + answer): + norm_answer = normalize_answer(answer["input_text"]) + + if norm_answer == "unknown" or "bad_turn" in answer: + return "unknown", None + + if norm_answer == "yes": + return "yes", None + + if norm_answer == "no": + return "no", None + + if norm_answer in ["none", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten"]: + return "number", norm_answer + + norm_question_tokens = CoQAEvaluator.normalize_answer(question["input_text"]).split(" ") + if "or" in norm_question_tokens: + index = norm_question_tokens.index("or") + if index-1 >= 0 and index+1 < len(norm_question_tokens): + if norm_answer == norm_question_tokens[index-1]: + norm_answer = "option_a" + elif norm_answer == norm_question_tokens[index+1]: + norm_answer = "option_b" + + if norm_answer in ["option_a", "option_b"]: + return "option", norm_answer + + return "span", None + + def process_found_answer(raw_answer, + found_answer): + raw_answer_tokens = raw_answer.split(' ') + found_answer_tokens = found_answer.split(' ') + + raw_answer_last_token = raw_answer_tokens[-1].lower() + found_answer_last_token = found_answer_tokens[-1].lower() + + if (raw_answer_last_token != found_answer_last_token and + raw_answer_last_token == found_answer_last_token.rstrip(string.punctuation)): + found_answer_tokens[-1] = found_answer_tokens[-1].rstrip(string.punctuation) + + return ' '.join(found_answer_tokens) + + def generate_final_answer(question_text, + answer_text, + answer_type, + answer_subtype): + if answer_type == "span" and answer_text: + final_answer_text = answer_text + elif answer_type == "unknown": + final_answer_text = "unknown" + elif answer_type in ["yes", "no"]: + final_answer_text = answer_type + elif answer_type == "number": + final_answer_text = answer_subtype + elif answer_type == "option": + norm_question_tokens = CoQAEvaluator.normalize_answer(question_text).split(" ") + if "or" in norm_question_tokens: + index = norm_question_tokens.index("or") + if index-1 >= 0 and index+1 < len(norm_question_tokens): + if answer_subtype == "option_a": + final_answer_text = norm_question_tokens[index-1] + if answer_subtype == "option_b": + final_answer_text = norm_question_tokens[index+1] + else: + final_answer_text = "" + + return final_answer_text + + examples = [] + for data in input_data: + data_id = data["id"] + paragraph_text = data["story"] + + doc_tokens = [] + char_to_word_offset = [] + prev_is_whitespace = True + for c in paragraph_text: + if is_whitespace(c): + prev_is_whitespace = True + else: + if prev_is_whitespace: + doc_tokens.append(c) + else: + doc_tokens[-1] += c + prev_is_whitespace = False + char_to_word_offset.append(len(doc_tokens) - 1) + + questions = sorted(data["questions"], key=lambda x: x["turn_id"]) + answers = sorted(data["answers"], key=lambda x: x["turn_id"]) + + question_history = [] + qas = list(zip(questions, answers)) + for i, (question, answer) in enumerate(qas): + qas_id = "{0}_{1}".format(data_id, i+1) + + answer_text, span_start, span_end, match_score, is_skipped = get_answer_span(answer, paragraph_text) + answer_type, answer_subtype = get_answer_type(question, answer) + + question_text = get_question_text(question_history, question) + question_history = get_question_history(question_history, question, answer, -1) + + orig_answer_text = "" + start_position = -1 + end_position = -1 + if answer_type == "span": + if not is_skipped: + orig_answer_text = process_found_answer(answer["input_text"], answer_text) + start_position = char_to_word_offset[span_start] + end_position = char_to_word_offset[span_start + len(orig_answer_text) - 1] + elif answer["span_text"]: + span_tokens = whitespace_tokenize(answer["span_text"]) + if len(span_tokens) <= max_answer_length: + orig_answer_text = answer["span_text"] + start_position = char_to_word_offset[answer["span_start"]] + end_position = char_to_word_offset[answer["span_start"] + len(orig_answer_text) - 1] + is_skipped = False + + final_answer_text = generate_final_answer(question["input_text"], orig_answer_text, answer_type, answer_subtype) + if answer_type != "span": + if match_score == 1.0 and CoQAEvaluator.normalize_answer(answer_text) != final_answer_text: + orig_answer_text = process_found_answer(answer["input_text"], answer_text) + start_position = char_to_word_offset[span_start] + end_position = char_to_word_offset[span_start + len(orig_answer_text) - 1] + answer_type, answer_subtype = "span", None + + is_skipped = False + + example = CoqaExample( + qas_id=qas_id, + question_text=question_text, + doc_tokens=doc_tokens, + orig_answer_text=orig_answer_text, + start_position=start_position, + end_position=end_position, + answer_type=answer_type, + answer_subtype=answer_subtype, + is_skipped=is_skipped) + + examples.append(example) + + example_list = [] + for example in examples: + id_items = example.qas_id.split('_') + id = id_items[0] + turn_id = int(id_items[1]) + + question_text = example.question_text.split("")[-1] + answer = generate_final_answer(question_text, example.orig_answer_text, example.answer_type, example.answer_subtype) + + example_list.append({ + "id": id, + "turn_id": turn_id, + "answer": answer + }) + + example_file = r"data/coqa/v1.0/dev-v1.0.search.json" + with open(example_file, "w") as file: + json.dump(example_list, file, indent=4) + + return examples + + +def convert_examples_to_features(examples, tokenizer, max_seq_length, + doc_stride, max_query_length, is_training, + cls_token_at_end=False, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=0, pad_token_segment_id=0, + mask_padding_with_zero=True, sep_token_extra=False, add_prefix_space=False, enable_special_answer=False): + """Loads a data file into a list of `InputBatch`s.""" + + unique_id = 1000000000 + # cnt_pos, cnt_neg = 0, 0 + # max_N, max_M = 1024, 1024 + # f = np.zeros((max_N, max_M), dtype=np.float32) + special_answer_list = [ + '', '', + '', '', '', '', '', '', + '', '', '', '', '', + '', '' + ] + + features = [] + for (example_index, example) in enumerate(examples): + + # if example_index % 100 == 0: + # logger.info('Converting %s/%s pos %s neg %s', example_index, len(examples), cnt_pos, cnt_neg) + + query_tokens = [] + qa_texts = example.question_text.split('') + for qa_text in qa_texts: + qa_text = qa_text.strip() + if not qa_text: + continue + + query_tokens.append('') + + qa_items = qa_text.split('') + if len(qa_items) < 1: + continue + + q_text = qa_items[0].strip() + if add_prefix_space: + q_tokens = tokenizer.tokenize(q_text, add_prefix_space=True) + else: + q_tokens = tokenizer.tokenize(q_text) + + query_tokens.extend(q_tokens) + + if len(qa_items) < 2: + continue + + query_tokens.append('') + + a_text = qa_items[1].strip() + if add_prefix_space: + a_tokens = tokenizer.tokenize(a_text, add_prefix_space=True) + else: + a_tokens = tokenizer.tokenize(a_text) + + query_tokens.extend(a_tokens) + + if len(query_tokens) > max_query_length: + query_tokens = query_tokens[-max_query_length:] + + tok_to_orig_index = [] + orig_to_tok_index = [] + all_doc_tokens = [] + for (i, token) in enumerate(example.doc_tokens): + orig_to_tok_index.append(len(all_doc_tokens)) + + if add_prefix_space: + sub_tokens = tokenizer.tokenize(token, add_prefix_space=True) + else: + sub_tokens = tokenizer.tokenize(token) + + for sub_token in sub_tokens: + tok_to_orig_index.append(i) + all_doc_tokens.append(sub_token) + + tok_start_position = None + tok_end_position = None + if example.answer_type == "span" and not example.is_skipped and example.orig_answer_text: + tok_start_position = orig_to_tok_index[example.start_position] + if example.end_position < len(example.doc_tokens) - 1: + tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 + else: + tok_end_position = len(all_doc_tokens) - 1 + (tok_start_position, tok_end_position) = _improve_answer_span( + all_doc_tokens, tok_start_position, tok_end_position, tokenizer, + example.orig_answer_text, add_prefix_space) + else: + tok_start_position = -1 + tok_end_position = -1 + + # The -3 accounts for [CLS], [SEP] and [SEP] + special_tokens_count = 4 if sep_token_extra else 3 + if enable_special_answer: + max_tokens_for_doc = max_seq_length - len(query_tokens) - special_tokens_count - len(special_answer_list) + else: + max_tokens_for_doc = max_seq_length - len(query_tokens) - special_tokens_count + + # We can have documents that are longer than the maximum sequence length. + # To deal with this we do a sliding window approach, where we take chunks + # of the up to our max length with a stride of `doc_stride`. + _DocSpan = collections.namedtuple( # pylint: disable=invalid-name + "DocSpan", ["start", "length"]) + doc_spans = [] + start_offset = 0 + while start_offset < len(all_doc_tokens): + length = len(all_doc_tokens) - start_offset + if length > max_tokens_for_doc: + length = max_tokens_for_doc + doc_spans.append(_DocSpan(start=start_offset, length=length)) + if start_offset + length == len(all_doc_tokens): + break + start_offset += min(length, doc_stride) + + for (doc_span_index, doc_span) in enumerate(doc_spans): + tokens = [] + token_to_orig_map = {} + token_is_max_context = {} + segment_ids = [] + + # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) + # Original TF implem also keep the classification token (set to 0) (not sure why...) + p_mask = [] + special_answer_offset = 0 + + if cls_token_at_end: + if enable_special_answer: + special_answer_offset = len(tokens) + for special_answer in special_answer_list: + tokens.append(special_answer) + segment_ids.append(cls_token_segment_id) + p_mask.append(0) + + # Paragraph + for i in range(doc_span.length): + split_token_index = doc_span.start + i + token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] + is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) + token_is_max_context[len(tokens)] = is_max_context + tokens.append(all_doc_tokens[split_token_index]) + segment_ids.append(sequence_a_segment_id) + p_mask.append(0) + paragraph_len = doc_span.length + + # SEP token + tokens.append(sep_token) + segment_ids.append(sequence_a_segment_id) + p_mask.append(1) + + if sep_token_extra: + tokens.append(sep_token) + segment_ids.append(sequence_a_segment_id) + p_mask.append(1) + + # Query + for token in query_tokens: + tokens.append(token) + segment_ids.append(sequence_b_segment_id) + p_mask.append(1) + + # SEP token + tokens.append(sep_token) + segment_ids.append(sequence_b_segment_id) + p_mask.append(1) + + # CLS token at the end + tokens.append(cls_token) + segment_ids.append(cls_token_segment_id) + p_mask.append(0) + cls_index = len(tokens) - 1 # Index of classification token + else: + # CLS token at the beginning + tokens.append(cls_token) + segment_ids.append(cls_token_segment_id) + p_mask.append(0) + cls_index = 0 + + if enable_special_answer: + special_answer_offset = len(tokens) + for special_answer in special_answer_list: + tokens.append(special_answer) + segment_ids.append(cls_token_segment_id) + p_mask.append(0) + + # Query + for token in query_tokens: + tokens.append(token) + segment_ids.append(sequence_a_segment_id) + p_mask.append(1) + + # SEP token + tokens.append(sep_token) + segment_ids.append(sequence_a_segment_id) + p_mask.append(1) + + if sep_token_extra: + tokens.append(sep_token) + segment_ids.append(sequence_a_segment_id) + p_mask.append(1) + + # Paragraph + for i in range(doc_span.length): + split_token_index = doc_span.start + i + token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] + is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) + token_is_max_context[len(tokens)] = is_max_context + tokens.append(all_doc_tokens[split_token_index]) + segment_ids.append(sequence_b_segment_id) + p_mask.append(0) + paragraph_len = doc_span.length + + # SEP token + tokens.append(sep_token) + segment_ids.append(sequence_b_segment_id) + p_mask.append(1) + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + while len(input_ids) < max_seq_length: + input_ids.append(pad_token) + input_mask.append(0 if mask_padding_with_zero else 1) + segment_ids.append(pad_token_segment_id) + p_mask.append(1) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + start_position = None + end_position = None + if example.answer_type == "span" and not example.is_skipped and example.orig_answer_text: + # For training, if our document chunk does not contain an annotation + # we throw it out, since there is nothing to predict. + doc_start = doc_span.start + doc_end = doc_span.start + doc_span.length - 1 + if tok_start_position >= doc_start and tok_end_position <= doc_end: + doc_offset = 0 if cls_token_at_end else len(query_tokens) + special_tokens_count - 1 + doc_offset = doc_offset + len(special_answer_list) if enable_special_answer else doc_offset + start_position = tok_start_position - doc_start + doc_offset + end_position = tok_end_position - doc_start + doc_offset + else: + start_position = cls_index + end_position = cls_index + elif enable_special_answer: + if example.answer_type == "yes": + start_position = special_answer_offset + end_position = special_answer_offset + elif example.answer_type == "no": + start_position = special_answer_offset + 1 + end_position = special_answer_offset + 1 + elif example.answer_type == "number" and example.answer_subtype == "none": + start_position = special_answer_offset + 2 + end_position = special_answer_offset + 2 + elif example.answer_type == "number" and example.answer_subtype == "one": + start_position = special_answer_offset + 3 + end_position = special_answer_offset + 3 + elif example.answer_type == "number" and example.answer_subtype == "two": + start_position = special_answer_offset + 4 + end_position = special_answer_offset + 4 + elif example.answer_type == "number" and example.answer_subtype == "three": + start_position = special_answer_offset + 5 + end_position = special_answer_offset + 5 + elif example.answer_type == "number" and example.answer_subtype == "four": + start_position = special_answer_offset + 6 + end_position = special_answer_offset + 6 + elif example.answer_type == "number" and example.answer_subtype == "five": + start_position = special_answer_offset + 7 + end_position = special_answer_offset + 7 + elif example.answer_type == "number" and example.answer_subtype == "six": + start_position = special_answer_offset + 8 + end_position = special_answer_offset + 8 + elif example.answer_type == "number" and example.answer_subtype == "seven": + start_position = special_answer_offset + 9 + end_position = special_answer_offset + 9 + elif example.answer_type == "number" and example.answer_subtype == "eight": + start_position = special_answer_offset + 10 + end_position = special_answer_offset + 10 + elif example.answer_type == "number" and example.answer_subtype == "nine": + start_position = special_answer_offset + 11 + end_position = special_answer_offset + 11 + elif example.answer_type == "number" and example.answer_subtype == "ten": + start_position = special_answer_offset + 12 + end_position = special_answer_offset + 12 + elif example.answer_type == "option" and example.answer_subtype == "option_a": + start_position = special_answer_offset + 13 + end_position = special_answer_offset + 13 + elif example.answer_type == "option" and example.answer_subtype == "option_b": + start_position = special_answer_offset + 14 + end_position = special_answer_offset + 14 + else: + start_position = cls_index + end_position = cls_index + else: + start_position = cls_index + end_position = cls_index + + if example_index < 20: + logger.info("*** Example ***") + logger.info("unique_id: %s" % (unique_id)) + logger.info("example_index: %s" % (example_index)) + logger.info("doc_span_index: %s" % (doc_span_index)) + logger.info("tokens: %s" % " ".join(tokens)) + logger.info("token_to_orig_map: %s" % " ".join([ + "%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()])) + logger.info("token_is_max_context: %s" % " ".join([ + "%d:%s" % (x, y) for (x, y) in token_is_max_context.items() + ])) + logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) + logger.info( + "input_mask: %s" % " ".join([str(x) for x in input_mask])) + logger.info( + "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + + if example.answer_type == "span" and not example.is_skipped and example.orig_answer_text: + answer_text = " ".join(tokens[start_position:(end_position + 1)]) + logger.info("start_position: %d" % (start_position)) + logger.info("end_position: %d" % (end_position)) + logger.info("answer_text: %s" % (answer_text)) + logger.info("answer_type: %s" % (example.answer_type)) + logger.info("answer_subtype: %s" % (example.answer_subtype)) + else: + logger.info("answer_type: %s" % (example.answer_type)) + logger.info("answer_subtype: %s" % (example.answer_subtype)) + + + features.append( + InputFeatures( + unique_id=unique_id, + example_index=example_index, + doc_span_index=doc_span_index, + tokens=tokens, + token_to_orig_map=token_to_orig_map, + token_is_max_context=token_is_max_context, + input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + cls_index=cls_index, + p_mask=p_mask, + paragraph_len=paragraph_len, + start_position=start_position, + end_position=end_position)) + unique_id += 1 + + return features + + +def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, + orig_answer_text, add_prefix_space=False): + """Returns tokenized answer spans that better match the annotated answer.""" + + # The CoQA annotations are character based. We first project them to + # whitespace-tokenized words. But then after WordPiece tokenization, we can + # often find a "better match". For example: + # + # Question: What year was John Smith born? + # Context: The leader was John Smith (1895-1943). + # Answer: 1895 + # + # The original whitespace-tokenized answer will be "(1895-1943).". However + # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match + # the exact answer, 1895. + # + # However, this is not always possible. Consider the following: + # + # Question: What country is the top exporter of electornics? + # Context: The Japanese electronics industry is the lagest in the world. + # Answer: Japan + # + # In this case, the annotator chose "Japan" as a character sub-span of + # the word "Japanese". Since our WordPiece tokenizer does not split + # "Japanese", we just use "Japanese" as the annotation. This is fairly rare + # in CoQA, but does happen. + if add_prefix_space: + tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text, add_prefix_space=True)) + else: + tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) + + for new_start in range(input_start, input_end + 1): + for new_end in range(input_end, new_start - 1, -1): + text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) + if text_span == tok_answer_text: + return (new_start, new_end) + + return (input_start, input_end) + + +def _check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + + # Because of the sliding window approach taken to scoring documents, a single + # token can appear in multiple documents. E.g. + # Doc: the man went to the store and bought a gallon of milk + # Span A: the man went to the + # Span B: to the store and bought + # Span C: and bought a gallon of + # ... + # + # Now the word 'bought' will have two scores from spans B and C. We only + # want to consider the score with "maximum context", which we define as + # the *minimum* of its left and right context (the *sum* of left and + # right context will always be the same, of course). + # + # In the example the maximum context for 'bought' would be span C since + # it has 1 left context and 3 right context, while span B has 4 left context + # and 0 right context. + best_score = None + best_span_index = None + for (span_index, doc_span) in enumerate(doc_spans): + end = doc_span.start + doc_span.length - 1 + if position < doc_span.start: + continue + if position > end: + continue + num_left_context = position - doc_span.start + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span.length + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + + +RawResult = collections.namedtuple("RawResult", + ["unique_id", "start_logits", "end_logits"]) + +def write_predictions(all_examples, all_features, all_results, n_best_size, + max_answer_length, do_lower_case, output_prediction_file, + output_nbest_file, output_null_log_odds_file, verbose_logging, + version_2_with_negative, null_score_diff_threshold, tokenizer=None): + """Write final predictions to the json file and log-odds of null if needed.""" + logger.info("Writing predictions to: %s" % (output_prediction_file)) + logger.info("Writing nbest to: %s" % (output_nbest_file)) + + example_index_to_features = collections.defaultdict(list) + for feature in all_features: + example_index_to_features[feature.example_index].append(feature) + + unique_id_to_result = {} + for result in all_results: + unique_id_to_result[result.unique_id] = result + + _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name + "PrelimPrediction", + ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) + + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + scores_diff_json = collections.OrderedDict() + + for (example_index, example) in enumerate(all_examples): + features = example_index_to_features[example_index] + + prelim_predictions = [] + # keep track of the minimum score of null start+end of position 0 + score_null = 1000000 # large and positive + min_null_feature_index = 0 # the paragraph slice with min null score + null_start_logit = 0 # the start logit at the slice with min null score + null_end_logit = 0 # the end logit at the slice with min null score + for (feature_index, feature) in enumerate(features): + result = unique_id_to_result[feature.unique_id] + start_indexes = _get_best_indexes(result.start_logits, n_best_size) + end_indexes = _get_best_indexes(result.end_logits, n_best_size) + # if we could have irrelevant answers, get the min score of irrelevant + if version_2_with_negative: + feature_null_score = result.start_logits[0] + result.end_logits[0] + if feature_null_score < score_null: + score_null = feature_null_score + min_null_feature_index = feature_index + null_start_logit = result.start_logits[0] + null_end_logit = result.end_logits[0] + for start_index in start_indexes: + for end_index in end_indexes: + # We could hypothetically create invalid predictions, e.g., predict + # that the start of the span is in the question. We throw out all + # invalid predictions. + if start_index >= len(feature.tokens): + continue + if end_index >= len(feature.tokens): + continue + if start_index not in feature.token_to_orig_map: + continue + if end_index not in feature.token_to_orig_map: + continue + if not feature.token_is_max_context.get(start_index, False): + continue + if end_index < start_index: + continue + length = end_index - start_index + 1 + if length > max_answer_length: + continue + prelim_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + start_index=start_index, + end_index=end_index, + start_logit=result.start_logits[start_index], + end_logit=result.end_logits[end_index])) + if version_2_with_negative: + prelim_predictions.append( + _PrelimPrediction( + feature_index=min_null_feature_index, + start_index=0, + end_index=0, + start_logit=null_start_logit, + end_logit=null_end_logit)) + prelim_predictions = sorted( + prelim_predictions, + key=lambda x: (x.start_logit + x.end_logit), + reverse=True) + + _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name + "NbestPrediction", ["text", "start_logit", "end_logit"]) + + seen_predictions = {} + nbest = [] + for pred in prelim_predictions: + if len(nbest) >= n_best_size: + break + feature = features[pred.feature_index] + if pred.start_index > 0: # this is a non-null prediction + tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] + orig_doc_start = feature.token_to_orig_map[pred.start_index] + orig_doc_end = feature.token_to_orig_map[pred.end_index] + orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] + + if tokenizer is not None: + tok_text = tokenizer.convert_tokens_to_string(tok_tokens) + else: + tok_text = " ".join(tok_tokens) + + # De-tokenize WordPieces that have been split off. + tok_text = tok_text.replace(" ##", "") + tok_text = tok_text.replace("##", "") + + # Clean whitespace + tok_text = tok_text.strip() + tok_text = " ".join(tok_text.split()) + orig_text = " ".join(orig_tokens) + + final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) + if final_text in seen_predictions: + continue + + seen_predictions[final_text] = True + else: + final_text = "" + seen_predictions[final_text] = True + + nbest.append( + _NbestPrediction( + text=final_text, + start_logit=pred.start_logit, + end_logit=pred.end_logit)) + # if we didn't include the empty option in the n-best, include it + if version_2_with_negative: + if "" not in seen_predictions: + nbest.append( + _NbestPrediction( + text="", + start_logit=null_start_logit, + end_logit=null_end_logit)) + + # In very rare edge cases we could only have single null prediction. + # So we just create a nonce prediction in this case to avoid failure. + if len(nbest)==1: + nbest.insert(0, + _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) + + # In very rare edge cases we could have no valid predictions. So we + # just create a nonce prediction in this case to avoid failure. + if not nbest: + nbest.append( + _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) + + assert len(nbest) >= 1 + + total_scores = [] + best_non_null_entry = None + for entry in nbest: + total_scores.append(entry.start_logit + entry.end_logit) + if not best_non_null_entry: + if entry.text: + best_non_null_entry = entry + + probs = _compute_softmax(total_scores) + + nbest_json = [] + for (i, entry) in enumerate(nbest): + output = collections.OrderedDict() + output["text"] = entry.text + output["probability"] = probs[i] + output["start_logit"] = entry.start_logit + output["end_logit"] = entry.end_logit + nbest_json.append(output) + + assert len(nbest_json) >= 1 + + if not version_2_with_negative: + all_predictions[example.qas_id] = nbest_json[0]["text"] + else: + # predict "" iff the null score - the score of best non-null > threshold + score_diff = score_null - best_non_null_entry.start_logit - ( + best_non_null_entry.end_logit) + scores_diff_json[example.qas_id] = score_diff + if score_diff > null_score_diff_threshold: + all_predictions[example.qas_id] = "" + else: + all_predictions[example.qas_id] = best_non_null_entry.text + all_nbest_json[example.qas_id] = nbest_json + + with open(output_prediction_file, "w") as writer: + writer.write(json.dumps(all_predictions, indent=4) + "\n") + + with open(output_nbest_file, "w") as writer: + writer.write(json.dumps(all_nbest_json, indent=4) + "\n") + + if version_2_with_negative: + with open(output_null_log_odds_file, "w") as writer: + writer.write(json.dumps(scores_diff_json, indent=4) + "\n") + + return all_predictions + +def write_predictions_v2(all_examples, all_features, all_results, n_best_size, + max_answer_length, do_lower_case, output_prediction_file, + output_nbest_file, output_null_log_odds_file, verbose_logging, + answer_score_threshold=None, tokenizer=None, enable_special_answer=None): + """Write final predictions to the json file and log-odds of null if needed.""" + logger.info("Writing predictions to: %s" % (output_prediction_file)) + logger.info("Writing nbest to: %s" % (output_nbest_file)) + + example_index_to_features = collections.defaultdict(list) + for feature in all_features: + example_index_to_features[feature.example_index].append(feature) + + unique_id_to_result = {} + for result in all_results: + unique_id_to_result[result.unique_id] = result + + _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name + "PrelimPrediction", + ["feature_index", "answer_type", "start_index", "end_index", "start_logit", "end_logit"]) + + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + + if enable_special_answer: + special_answer_list = [ + 'yes', 'no', + 'none', 'one', 'two', 'three', 'four', 'five', + 'six', 'seven', 'eight', 'nine', 'ten', + 'option_a', 'option_b' + ] + else: + special_answer_list = [] + + for (example_index, example) in enumerate(all_examples): + # go through each example + prelim_predictions = [] + unknown_predictions = [] + special_predictions = { special_answer: [] for special_answer in special_answer_list } + + features = example_index_to_features[example_index] + for (feature_index, feature) in enumerate(features): + # go through each feature of an example + result = unique_id_to_result[feature.unique_id] + + # for span answers + start_indexes = _get_best_indexes(result.start_logits, n_best_size) + end_indexes = _get_best_indexes(result.end_logits, n_best_size) + for start_index in start_indexes: + for end_index in end_indexes: + # get answer type by start/end index + answer_type = get_answer_type(start_index, end_index, len(feature.tokens), special_answer_list) + + # We could hypothetically create invalid predictions, e.g., predict + # that the start of the span is in the question. We throw out all + # invalid predictions. + if answer_type != "span": + continue + if start_index >= len(feature.tokens): + continue + if end_index >= len(feature.tokens): + continue + if start_index not in feature.token_to_orig_map: + continue + if end_index not in feature.token_to_orig_map: + continue + if not feature.token_is_max_context.get(start_index, False): + continue + if end_index < start_index: + continue + if end_index - start_index + 1 > max_answer_length: + continue + + prelim_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + answer_type=answer_type, + start_index=start_index, + end_index=end_index, + start_logit=result.start_logits[start_index], + end_logit=result.end_logits[end_index])) + + # for unknown answers + unknown_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + answer_type="unknown", + start_index=0, + end_index=0, + start_logit=result.start_logits[0], + end_logit=result.end_logits[0])) + + # for special answers + for i, special_answer in enumerate(special_answer_list): + idx = i + 1 + special_predictions[special_answer].append( + _PrelimPrediction( + feature_index=feature_index, + answer_type=special_answer_list[i], + start_index=idx, + end_index=idx, + start_logit=result.start_logits[idx], + end_logit=result.end_logits[idx])) + + unknown_predictions = sorted(unknown_predictions, key=lambda x: (x.start_logit + x.end_logit)) + special_predictions = { + special_answer: sorted(special_predictions[special_answer], key=lambda x: (x.start_logit + x.end_logit), reverse=True) + for special_answer in special_answer_list + } + + if len(unknown_predictions) > 0: + prelim_predictions.append(unknown_predictions[0]) + + for special_answer in special_answer_list: + if len(special_predictions[special_answer]) > 0: + prelim_predictions.append(special_predictions[special_answer][0]) + + prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) + + _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name + "NbestPrediction", ["text", "answer_type", "start_logit", "end_logit"]) + + seen_predictions = {} + nbest = [] + for pred in prelim_predictions: + if len(nbest) >= n_best_size: + break + + feature = features[pred.feature_index] + + if pred.answer_type == "span": # this is a non-null prediction + tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] + orig_doc_start = feature.token_to_orig_map[pred.start_index] + orig_doc_end = feature.token_to_orig_map[pred.end_index] + orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] + + if tokenizer is not None: + tok_text = tokenizer.convert_tokens_to_string(tok_tokens) + else: + tok_text = " ".join(tok_tokens) + + # De-tokenize WordPieces that have been split off. + tok_text = tok_text.replace(" ##", "") + tok_text = tok_text.replace("##", "") + + # Clean whitespace + tok_text = tok_text.strip() + tok_text = " ".join(tok_text.split()) + orig_text = " ".join(orig_tokens) + + final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) + + if final_text in seen_predictions: + continue + + seen_predictions[final_text] = True + else: + final_text = pred.answer_type + + if final_text in ["option_a", "option_b"]: + question_text = example.question_text.split("")[-1] + norm_question_tokens = CoQAEvaluator.normalize_answer(question_text).split(" ") + if "or" in norm_question_tokens: + index = norm_question_tokens.index("or") + if index-1 >= 0 and index+1 < len(norm_question_tokens): + if final_text == "option_a": + final_text = norm_question_tokens[index-1] + if final_text == "option_b": + final_text = norm_question_tokens[index+1] + + nbest.append( + _NbestPrediction( + text=final_text, + answer_type=pred.answer_type, + start_logit=pred.start_logit, + end_logit=pred.end_logit)) + + assert len(nbest) > 0 + + total_scores = [] + for entry in nbest: + total_scores.append(entry.start_logit + entry.end_logit) + + probs = _compute_softmax(total_scores) + + nbest_json = [] + for (i, entry) in enumerate(nbest): + output = collections.OrderedDict() + output["text"] = entry.text + output["probability"] = probs[i] + output["answer_type"] = entry.answer_type + output["start_logit"] = entry.start_logit + output["end_logit"] = entry.end_logit + nbest_json.append(output) + + assert len(nbest_json) > 0 + + if answer_score_threshold: + for data in nbest_json: + if data["answer_type"] != "span" and data["probability"] > answer_score_threshold: + all_predictions[example.qas_id] = data["text"] + break + elif data["answer_type"] == "span": + all_predictions[example.qas_id] = data["text"] + break + else: + all_predictions[example.qas_id] = nbest_json[0]["text"] + + all_nbest_json[example.qas_id] = nbest_json + + output_data = [] + for qas_id in all_predictions.keys(): + id_items = qas_id.split('_') + id = id_items[0] + turn_id = int(id_items[1]) + + answer = all_predictions[qas_id] + + output_data.append({ + "id": id, + "turn_id": turn_id, + "answer": answer + }) + + with open(output_prediction_file, "w") as file: + json.dump(output_data, file, indent=4) + + with open(output_nbest_file, "w") as writer: + writer.write(json.dumps(all_nbest_json, indent=4) + "\n") + + return all_predictions + +# For XLNet (and XLM which uses the same head) +RawResultExtended = collections.namedtuple("RawResultExtended", + ["unique_id", "start_top_log_probs", "start_top_index", + "end_top_log_probs", "end_top_index", "cls_logits"]) + + +def write_predictions_extended(all_examples, all_features, all_results, n_best_size, + max_answer_length, output_prediction_file, + output_nbest_file, + output_null_log_odds_file, orig_data_file, + start_n_top, end_n_top, version_2_with_negative, + tokenizer, do_lower_case, verbose_logging): + """ XLNet write prediction logic (more complex than Bert's). + Write final predictions to the json file and log-odds of null if needed. + + Requires utils_coqa_evaluate.py + """ + _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name + "PrelimPrediction", + ["feature_index", "start_index", "end_index", + "start_log_prob", "end_log_prob"]) + + _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name + "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]) + + logger.info("Writing predictions to: %s", output_prediction_file) + # logger.info("Writing nbest to: %s" % (output_nbest_file)) + + example_index_to_features = collections.defaultdict(list) + for feature in all_features: + example_index_to_features[feature.example_index].append(feature) + + unique_id_to_result = {} + for result in all_results: + unique_id_to_result[result.unique_id] = result + + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + scores_diff_json = collections.OrderedDict() + + for (example_index, example) in enumerate(all_examples): + features = example_index_to_features[example_index] + + prelim_predictions = [] + # keep track of the minimum score of null start+end of position 0 + score_null = 1000000 # large and positive + + for (feature_index, feature) in enumerate(features): + result = unique_id_to_result[feature.unique_id] + + cur_null_score = result.cls_logits + + # if we could have irrelevant answers, get the min score of irrelevant + score_null = min(score_null, cur_null_score) + + for i in range(start_n_top): + for j in range(end_n_top): + start_log_prob = result.start_top_log_probs[i] + start_index = result.start_top_index[i] + + j_index = i * end_n_top + j + + end_log_prob = result.end_top_log_probs[j_index] + end_index = result.end_top_index[j_index] + + # We could hypothetically create invalid predictions, e.g., predict + # that the start of the span is in the question. We throw out all + # invalid predictions. + if start_index >= feature.paragraph_len - 1: + continue + if end_index >= feature.paragraph_len - 1: + continue + + if not feature.token_is_max_context.get(start_index, False): + continue + if end_index < start_index: + continue + length = end_index - start_index + 1 + if length > max_answer_length: + continue + + prelim_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + start_index=start_index, + end_index=end_index, + start_log_prob=start_log_prob, + end_log_prob=end_log_prob)) + + prelim_predictions = sorted( + prelim_predictions, + key=lambda x: (x.start_log_prob + x.end_log_prob), + reverse=True) + + seen_predictions = {} + nbest = [] + for pred in prelim_predictions: + if len(nbest) >= n_best_size: + break + feature = features[pred.feature_index] + + # XLNet un-tokenizer + # Let's keep it simple for now and see if we need all this later. + # + # tok_start_to_orig_index = feature.tok_start_to_orig_index + # tok_end_to_orig_index = feature.tok_end_to_orig_index + # start_orig_pos = tok_start_to_orig_index[pred.start_index] + # end_orig_pos = tok_end_to_orig_index[pred.end_index] + # paragraph_text = example.paragraph_text + # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip() + + # Previously used Bert untokenizer + tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] + orig_doc_start = feature.token_to_orig_map[pred.start_index] + orig_doc_end = feature.token_to_orig_map[pred.end_index] + orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] + tok_text = tokenizer.convert_tokens_to_string(tok_tokens) + + # Clean whitespace + tok_text = tok_text.strip() + tok_text = " ".join(tok_text.split()) + orig_text = " ".join(orig_tokens) + + final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) + + if final_text in seen_predictions: + continue + + seen_predictions[final_text] = True + + nbest.append( + _NbestPrediction( + text=final_text, + start_log_prob=pred.start_log_prob, + end_log_prob=pred.end_log_prob)) + + # In very rare edge cases we could have no valid predictions. So we + # just create a nonce prediction in this case to avoid failure. + if not nbest: + nbest.append( + _NbestPrediction(text="", start_log_prob=-1e6, + end_log_prob=-1e6)) + + total_scores = [] + best_non_null_entry = None + for entry in nbest: + total_scores.append(entry.start_log_prob + entry.end_log_prob) + if not best_non_null_entry: + best_non_null_entry = entry + + probs = _compute_softmax(total_scores) + + nbest_json = [] + for (i, entry) in enumerate(nbest): + output = collections.OrderedDict() + output["text"] = entry.text + output["probability"] = probs[i] + output["start_log_prob"] = entry.start_log_prob + output["end_log_prob"] = entry.end_log_prob + nbest_json.append(output) + + assert len(nbest_json) >= 1 + assert best_non_null_entry is not None + + score_diff = score_null + scores_diff_json[example.qas_id] = score_diff + # note(zhiliny): always predict best_non_null_entry + # and the evaluation script will search for the best threshold + all_predictions[example.qas_id] = best_non_null_entry.text + + all_nbest_json[example.qas_id] = nbest_json + + with open(output_prediction_file, "w") as writer: + writer.write(json.dumps(all_predictions, indent=4) + "\n") + + with open(output_nbest_file, "w") as writer: + writer.write(json.dumps(all_nbest_json, indent=4) + "\n") + + if version_2_with_negative: + with open(output_null_log_odds_file, "w") as writer: + writer.write(json.dumps(scores_diff_json, indent=4) + "\n") + + with open(orig_data_file, "r", encoding='utf-8') as reader: + orig_data = json.load(reader)["data"] + + qid_to_has_ans = make_qid_to_has_ans(orig_data) + has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] + no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] + exact_raw, f1_raw = get_raw_scores(orig_data, all_predictions) + out_eval = {} + + find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw, scores_diff_json, qid_to_has_ans) + + return out_eval + +def get_answer_type(start_index, end_index, max_seq_length, special_answer_list=None): + if special_answer_list and len(special_answer_list) > 0: + special_answer_offset = len(special_answer_list) + if start_index > special_answer_offset and end_index > special_answer_offset: + return "span" + else: + if start_index == end_index: + if start_index == 0: + return "unknown" + if start_index == 1: + return "yes" + if start_index == 2: + return "no" + if start_index == 3: + return "none" + if start_index == 4: + return "one" + if start_index == 5: + return "two" + if start_index == 6: + return "three" + if start_index == 7: + return "four" + if start_index == 8: + return "five" + if start_index == 9: + return "six" + if start_index == 10: + return "seven" + if start_index == 11: + return "eight" + if start_index == 12: + return "nine" + if start_index == 13: + return "ten" + if start_index == 14: + return "option_a" + if start_index == 15: + return "option_b" + else: + return "unknown" + else: + if start_index > 0 and end_index > 0: + return "span" + else: + return "unknown" + + +def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): + """Project the tokenized prediction back to the original text.""" + + # When we created the data, we kept track of the alignment between original + # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So + # now `orig_text` contains the span of our original text corresponding to the + # span that we predicted. + # + # However, `orig_text` may contain extra characters that we don't want in + # our prediction. + # + # For example, let's say: + # pred_text = steve smith + # orig_text = Steve Smith's + # + # We don't want to return `orig_text` because it contains the extra "'s". + # + # We don't want to return `pred_text` because it's already been normalized + # (the CoQA eval script also does punctuation stripping/lower casing but + # our tokenizer does additional normalization like stripping accent + # characters). + # + # What we really want to return is "Steve Smith". + # + # Therefore, we have to apply a semi-complicated alignment heuristic between + # `pred_text` and `orig_text` to get a character-to-character alignment. This + # can fail in certain cases in which case we just return `orig_text`. + + def _strip_spaces(text): + ns_chars = [] + ns_to_s_map = collections.OrderedDict() + for (i, c) in enumerate(text): + if c == " ": + continue + ns_to_s_map[len(ns_chars)] = i + ns_chars.append(c) + ns_text = "".join(ns_chars) + return (ns_text, ns_to_s_map) + + # We first tokenize `orig_text`, strip whitespace from the result + # and `pred_text`, and check if they are the same length. If they are + # NOT the same length, the heuristic has failed. If they are the same + # length, we assume the characters are one-to-one aligned. + tokenizer = BasicTokenizer(do_lower_case=do_lower_case) + + tok_text = " ".join(tokenizer.tokenize(orig_text)) + + start_position = tok_text.find(pred_text) + if start_position == -1: + if verbose_logging: + logger.info( + "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) + return orig_text + end_position = start_position + len(pred_text) - 1 + + (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) + (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) + + if len(orig_ns_text) != len(tok_ns_text): + if verbose_logging: + logger.info("Length not equal after stripping spaces: '%s' vs '%s'", + orig_ns_text, tok_ns_text) + return orig_text + + # We then project the characters in `pred_text` back to `orig_text` using + # the character-to-character alignment. + tok_s_to_ns_map = {} + for (i, tok_index) in tok_ns_to_s_map.items(): + tok_s_to_ns_map[tok_index] = i + + orig_start_position = None + if start_position in tok_s_to_ns_map: + ns_start_position = tok_s_to_ns_map[start_position] + if ns_start_position in orig_ns_to_s_map: + orig_start_position = orig_ns_to_s_map[ns_start_position] + + if orig_start_position is None: + if verbose_logging: + logger.info("Couldn't map start position") + return orig_text + + orig_end_position = None + if end_position in tok_s_to_ns_map: + ns_end_position = tok_s_to_ns_map[end_position] + if ns_end_position in orig_ns_to_s_map: + orig_end_position = orig_ns_to_s_map[ns_end_position] + + if orig_end_position is None: + if verbose_logging: + logger.info("Couldn't map end position") + return orig_text + + output_text = orig_text[orig_start_position:(orig_end_position + 1)] + return output_text + + +def _get_best_indexes(logits, n_best_size): + """Get the n-best logits from a list.""" + index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) + + best_indexes = [] + for i in range(len(index_and_score)): + if i >= n_best_size: + break + best_indexes.append(index_and_score[i][0]) + return best_indexes + + +def _compute_softmax(scores): + """Compute softmax probability over raw logits.""" + if not scores: + return [] + + max_score = None + for score in scores: + if max_score is None or score > max_score: + max_score = score + + exp_scores = [] + total_sum = 0.0 + for score in scores: + x = math.exp(score - max_score) + exp_scores.append(x) + total_sum += x + + probs = [] + for score in exp_scores: + probs.append(score / total_sum) + return probs