diff --git a/llama/README.md b/llama/README.md new file mode 100644 index 0000000..c64ffd0 --- /dev/null +++ b/llama/README.md @@ -0,0 +1 @@ +Code modified from: https://github.com/4AI/LS-LLaMA/tree/main \ No newline at end of file diff --git a/llama/evaluate_models.ipynb b/llama/evaluate_models.ipynb new file mode 100644 index 0000000..ef15cf5 --- /dev/null +++ b/llama/evaluate_models.ipynb @@ -0,0 +1,262 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "5d7b1d18", + "metadata": {}, + "source": [ + "### Evaluate Models\n" + ] + }, + { + "cell_type": "markdown", + "id": "80b1b2c7", + "metadata": {}, + "source": [ + "##### Imports" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "87dc70f1", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/anaconda/envs/azureml_py310_sdkv2/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], + "source": [ + "import torch\n", + "import pickle\n", + "import json\n", + "import numpy as np\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "import torch.nn.functional as F\n", + "from sklearn import metrics" + ] + }, + { + "cell_type": "markdown", + "id": "ed3988f7", + "metadata": {}, + "source": [ + "##### Evaluation Parameters" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "2a1fc80e", + "metadata": {}, + "outputs": [], + "source": [ + "threshold = 0.5 # currently we don't maximize val f1 to find the threshold... need to grab scores for all the val sets if we do this\n", + "num_std = 1.96\n", + "num_bootstrap = 1000\n", + "line_width = 2\n", + "alpha = 0.2\n", + "font_size = 16\n", + "legend_size = 10\n", + "x_size = 10\n", + "y_size = 10" + ] + }, + { + "cell_type": "markdown", + "id": "1ac2d927-76e0-48ab-8777-48bc70206d07", + "metadata": {}, + "source": [ + "##### Initialize Score, Model, and Color Arrays" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "acc793b2-80d0-45ac-9a3a-15dcf8fb53fb", + "metadata": {}, + "outputs": [], + "source": [ + "# Define master lists of labels, scores, names, and colors\n", + "all_y_trues, all_y_scores, all_model_names, all_colors = [], [], [], []" + ] + }, + { + "cell_type": "markdown", + "id": "aef4eaf2-fff5-4f83-8ade-2367a2513aa8", + "metadata": {}, + "source": [ + "##### Load Fine-Tuned Torch LM Results" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "26d15d8e-81cc-4cd5-a2dc-765325cb5a55", + "metadata": {}, + "outputs": [], + "source": [ + "# # ls llama 3 8b\n", + "# with open(\"ls-Meta-Llama-3-8B-msp-v2-mdace-20_raw_labels.pkl\", \"rb\") as f:\n", + "# ls_llama_8b_last_labels = pickle.load(f)\n", + "# with open(\"ls-Meta-Llama-3-8B-msp-v2-mdace-20_scores.pkl\", \"rb\") as f:\n", + "# ls_llama_8b_last_scores = pickle.load(f)\n", + "\n", + "# ls_llama_8b_last_scores_transformed = torch.sigmoid(torch.tensor(ls_llama_8b_last_scores))\n", + "# all_model_names.append(\"LS Llama-3 8B (Last)\")\n", + "# all_y_trues.append(ls_llama_8b_last_labels)\n", + "# all_y_scores.append(ls_llama_8b_last_scores_transformed)\n", + "# all_colors.append('#ab20fd')\n", + "\n", + "# ls unllama 3 8b\n", + "with open(\"ls-unllama-Meta-Llama-3-8B-msp-v2-mdace-20_raw_labels.pkl\", \"rb\") as f:\n", + " ls_unllama_8b_max_labels = pickle.load(f)\n", + "with open(\"ls-unllama-Meta-Llama-3-8B-msp-v2-mdace-20_raw_scores.pkl\", \"rb\") as f:\n", + " ls_unllama_8b_max_scores = pickle.load(f)\n", + "\n", + "ls_unllama_8b_max_scores_transformed = torch.sigmoid(torch.tensor(ls_unllama_8b_max_scores)).numpy()\n", + "all_model_names.append(\"LS UnLlama-3 8B (Max)\")\n", + "all_y_trues.append(ls_unllama_8b_max_labels)\n", + "all_y_scores.append(ls_unllama_8b_max_scores_transformed)\n", + "\n", + "# BELT Max 5 segments\n", + "with open(\"./BELT-BASELINE/bioclinicalroberta_belt_mdace20_510_step_128_max_5_labels.pkl\", \"rb\") as f:\n", + " belt_5_max_labels = pickle.load(f)\n", + "with open(\"./BELT-BASELINE/bioclinicalroberta_belt_mdace20_510_step_128_max_5_scores.pkl\", \"rb\") as f:\n", + " belt_5_max_scores = pickle.load(f)\n", + "\n", + "all_model_names.append(\"BELT 128 step 5 seg (Max)\")\n", + "all_y_trues.append(belt_5_max_labels)\n", + "all_y_scores.append(belt_5_max_scores)\n", + "\n", + "# BELT Max 128 segments\n", + "with open(\"./BELT-BASELINE/bioclinicalroberta_belt_mdace20_510_step_448_max_128_labels.pkl\", \"rb\") as f:\n", + " belt_128_max_labels = pickle.load(f)\n", + "with open(\"./BELT-BASELINE/bioclinicalroberta_belt_mdace20_510_step_448_max_128_scores.pkl\", \"rb\") as f:\n", + " belt_128_max_scores = pickle.load(f)\n", + "\n", + "all_model_names.append(\"BELT 448 step 128 seg (Max)\")\n", + "all_y_trues.append(belt_128_max_labels)\n", + "all_y_scores.append(belt_128_max_scores)" + ] + }, + { + "cell_type": "markdown", + "id": "6f21cd42", + "metadata": {}, + "source": [ + "##### Print Performance for all Metrics for all Models" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "2665e9ce-07c4-4196-9fe7-7f911123b8f9", + "metadata": {}, + "outputs": [], + "source": [ + "def print_mean_ci_of_metric_list(metric_list, metric_name, num_std):\n", + " mean_metric = np.mean(metric_list)\n", + " std_metric = np.std(metric_list)\n", + " metric_low = np.maximum(mean_metric - std_metric * num_std, 0)\n", + " metric_high = np.minimum(mean_metric + std_metric * num_std, 1)\n", + "\n", + " print(\n", + " f\"{metric_name}: {round(mean_metric, 3)} ([{round(metric_low, 3)} - {round(metric_high, 3)}] 95% CI)\"\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "4d6ff4a8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Results for LS UnLlama-3 8B (Max)\n", + "\n", + "Micro Average Precision: 0.277 ([0.256 - 0.299] 95% CI)\n", + "Micro ROC AUC: 0.828 ([0.818 - 0.839] 95% CI)\n", + "\n", + "Results for BELT 128 step 5 seg (Max)\n", + "\n", + "Micro Average Precision: 0.707 ([0.698 - 0.716] 95% CI)\n", + "Micro ROC AUC: 0.942 ([0.94 - 0.944] 95% CI)\n", + "\n", + "Results for BELT 448 step 128 seg (Max)\n", + "\n", + "Micro Average Precision: 0.804 ([0.797 - 0.812] 95% CI)\n", + "Micro ROC AUC: 0.971 ([0.969 - 0.972] 95% CI)\n" + ] + } + ], + "source": [ + "model2metric_df = {}\n", + "for y_trues, y_scores, name in zip(\n", + " all_y_trues, all_y_scores, all_model_names\n", + "):\n", + " \n", + " micro_aps, macro_aps, micro_roc_aucs, macro_roc_aucs = [], [], [], []\n", + " for i in range(num_bootstrap):\n", + " \n", + " # Sample N records with replacement where N is the total number of records\n", + " sample_indices = np.random.choice(len(y_trues), len(y_trues))\n", + " sample_labels = np.array(y_trues)[sample_indices]\n", + " sample_scores = np.array(y_scores)[sample_indices]\n", + " \n", + " micro_ap = metrics.average_precision_score(y_true=sample_labels, y_score=sample_scores, average='micro')\n", + " micro_aps.append(micro_ap)\n", + "\n", + " # macro_ap = metrics.average_precision_score(y_true=sample_labels, y_score=sample_scores, average='macro')\n", + " # macro_aps.append(macro_ap)\n", + "\n", + " micro_roc_auc = metrics.roc_auc_score(y_true=sample_labels, y_score=sample_scores, average='micro')\n", + " micro_roc_aucs.append(micro_roc_auc)\n", + "\n", + " # macro_roc_auc = metrics.roc_auc_score(y_true=sample_labels, y_score=sample_scores, average='macro')\n", + " # macro_roc_aucs.append(macro_roc_auc)\n", + " \n", + " metric_df = pd.DataFrame({\n", + " \"micro_aps\": micro_aps,\n", + " \"micro_roc_aucs\": micro_roc_aucs,\n", + " })\n", + " model2metric_df[name] = metric_df\n", + "\n", + " print(f\"\\nResults for {name}\\n\")\n", + " print_mean_ci_of_metric_list(micro_aps, metric_name=\"Micro Average Precision\", num_std=num_std)\n", + " print_mean_ci_of_metric_list(micro_roc_aucs, metric_name=\"Micro ROC AUC\", num_std=num_std)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.10 - SDK v2", + "language": "python", + "name": "python310-sdkv2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/llama/ls_llama_seq_clf.py b/llama/ls_llama_seq_clf.py new file mode 100644 index 0000000..ad126bc --- /dev/null +++ b/llama/ls_llama_seq_clf.py @@ -0,0 +1,238 @@ +# -*- coding: utf-8 -*- + +import os +import sys +import yaml +import time +import torch +import pickle +import logging +import transformers +from datasets import DatasetDict, Dataset +from typing import List, Any, Dict +from datasets import load_dataset, load_from_disk +from transformers.data import DataCollatorWithPadding +from transformers import TrainingArguments, Trainer, EarlyStoppingCallback, AutoTokenizer +from peft import get_peft_model, LoraConfig, TaskType +import evaluate +import numpy as np +import pandas as pd +from scipy.special import expit +from sklearn.metrics import average_precision_score + +from utils import check_empty_count_gpus, create_current_run, create_log_dir +from modeling_llama_local import LlamaForSequenceClassification +from modeling_unllama import UnmaskingLlamaForSequenceClassification + +os.environ["HF_EVALUATE_OFFLINE"] = "1" +os.environ["HF_DATASETS_OFFLINE"] = "1" + +# Load Run Parameters +with open("params.yml", "r") as stream: + PARAMS = yaml.safe_load(stream) + +batch_size = PARAMS["batch_size"] +gradient_accumulation_steps = PARAMS["gradient_accumulation_steps"] +learning_rate = PARAMS["learning_rate"] +lora_r = PARAMS["lora_r"] +lora_a = PARAMS["lora_a"] +max_length = PARAMS["max_length"] +warmup_steps = PARAMS["warmup_steps"] +eval_steps = PARAMS["eval_steps"] +save_steps = PARAMS["save_steps"] +logging_steps = PARAMS["logging_steps"] +early_stopping_patience = PARAMS["early_stopping_patience"] +pooling_strategy = PARAMS["pooling_strategy"] +dataset_path = PARAMS["dataset_path"] +train = PARAMS["train"] +resume_training = PARAMS["resume_training"] +resume_checkpoint = PARAMS["resume_checkpoint"] +test_checkpoint = PARAMS["test_checkpoint"] +model_id = PARAMS["model_id"] +output_path = PARAMS["output_path"] +model_name = PARAMS["model_name"] +unllama = PARAMS["unllama"] +id2label = PARAMS["id2label"] + +label2id = {v: k for k, v in id2label.items()} +ds = load_from_disk(dataset_path) + +# # This is to avoid using a map function which seems to be unreliable when used +# # in combination with the preprocess_function. I should understand this better, +# # but I'm just using Pandas for now to ensure we properly transform the label column. +# def wrap_label_column(dataset): +# df = dataset.to_pandas() +# df['label'] = df['label'].apply(lambda x: [int(x)]) +# return Dataset.from_pandas(df) + +# ds = DatasetDict({ +# 'train': wrap_label_column(ds['train']), +# 'val': wrap_label_column(ds['val']), +# 'test': wrap_label_column(ds['test']), +# }) + +# Define Logger +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Log CUDNN, PyTorch, and Transformers versions +logger.info(f"CUDNN version: {torch.backends.cudnn.version()}") +logger.info(f"Torch version: {torch.__version__}") +logger.info(f"Transformers version: {transformers.__version__}") + +# Check, Empty, and Count GPUs +check_empty_count_gpus(logger=logger) + +# Load tokenizer +tokenizer = AutoTokenizer.from_pretrained(PARAMS["tokenizer_id"]) # hot fix + +# llama doesn't have a pad token so we add one as the eos token +tokenizer.pad_token = tokenizer.eos_token + +# Only create a run directory if training a new model +if train: + + # Create Run Directory + current_run_dir = create_current_run( + save_path=output_path, params=PARAMS, logger=logger + ) + logger.info(f"Created run directory: {current_run_dir}.") + + # Create logging dir + logging_dir = create_log_dir(current_run_dir) + + # Set Run Name + run_name = current_run_dir.split("/")[-1] + logger.info(f"Starting run {run_name}...") + +def compute_metrics(eval_pred): + predictions, labels = eval_pred + sigmoid_predictions = expit(predictions) + micro_avg_pr_auc = average_precision_score(labels, sigmoid_predictions, average='micro') + return {"micro_avg_pr_auc": micro_avg_pr_auc} + +def preprocess_function(examples): + + return tokenizer(examples["text"], padding='longest', max_length=max_length, truncation=True) + +tokenized_ds = ds.map(preprocess_function, batched=True) + +# this is messing with things: https://huggingface.co/docs/transformers/en/main_classes/data_collator +# data_collator = DataCollatorWithPadding(tokenizer=tokenizer) + +df = tokenized_ds['train'].to_pandas() +with pd.option_context('display.max_rows', None, 'display.max_columns', None): + print(df.head()) + +# Train +if train: + + if unllama: + model = UnmaskingLlamaForSequenceClassification.from_pretrained(model_id, num_labels=len(label2id), id2label=id2label, label2id=label2id).bfloat16() + model.set_pooling(pooling_strategy) + else: + model = LlamaForSequenceClassification.from_pretrained(model_id, num_labels=len(label2id)).bfloat16() + + # set the pad token of the model's configuration + # https://stackoverflow.com/questions/68084302/assertionerror-cannot-handle-batch-sizes-1-if-no-padding-token-is-defined + model.config.pad_token_id = model.config.eos_token_id + + # # Set problem type explicitly + # model.config.problem_type = "binary_classification" + + peft_config = LoraConfig(task_type=TaskType.SEQ_CLS, inference_mode=False, r=lora_r, lora_alpha=lora_a, lora_dropout=0.1) + model = get_peft_model(model, peft_config) + + training_args = TrainingArguments( + output_dir=current_run_dir, + max_steps=1000000000, + learning_rate=learning_rate, + per_device_train_batch_size=batch_size, + per_device_eval_batch_size=batch_size, + gradient_accumulation_steps=gradient_accumulation_steps, + metric_for_best_model="eval_loss", + evaluation_strategy="steps", + save_strategy="steps", + eval_steps=eval_steps, + save_steps=save_steps, + warmup_steps=warmup_steps, + logging_steps=logging_steps, + logging_dir=logging_dir, + lr_scheduler_type='linear', + weight_decay=0.01, + adam_beta1=0.9, + adam_beta2=0.999, + adam_epsilon=0.00000001, + optim="adamw_torch", + load_best_model_at_end=True, + push_to_hub=False, + bf16=True, + bf16_full_eval=True, + gradient_checkpointing=True, + label_names='label' + ) + + # Define early stopping callback + early_stopping = EarlyStoppingCallback(early_stopping_patience=early_stopping_patience) + + trainer = Trainer( + model=model, + args=training_args, + train_dataset=tokenized_ds["train"], + eval_dataset=tokenized_ds["val"], + callbacks=[early_stopping], + tokenizer=tokenizer, + compute_metrics=compute_metrics, + ) + + # data_collator=data_collator, + + # Start training timer + start_time = time.time() + + # Start from model provided above and new training parameters defined above + if not resume_training: + trainer.train() + + # Resume using model and training parameters defined in checkpoint + else: + trainer.train(resume_checkpoint) + + # Log training time + end_time = time.time() + execution_time_hours = round((end_time - start_time) / 3600.0, 2) + logger.info(f"Training took {execution_time_hours} hours.") + + # Save best model + trainer.model.save_pretrained( + os.path.join(current_run_dir, f'{model_name}_model') + ) + tokenizer.save_pretrained( + os.path.join(current_run_dir, f'{model_name}_tokenizer') + ) + +# Test +else: + + if unllama: + model = UnmaskingLlamaForSequenceClassification.from_pretrained(test_checkpoint, num_labels=len(label2id), id2label=id2label, label2id=label2id).bfloat16() + model.set_pooling(pooling_strategy) + else: + model = LlamaForSequenceClassification.from_pretrained(test_checkpoint, num_labels=len(label2id)).bfloat16() + + peft_config = LoraConfig(task_type=TaskType.SEQ_CLS, inference_mode=True, r=lora_r, lora_alpha=lora_a, lora_dropout=0.1) + model = get_peft_model(model, peft_config) + model.print_trainable_parameters() + + trainer = Trainer(model=model) + +# Predict on test data +# Apply softmax or sigmoid to these outputs! +output = trainer.predict(tokenized_ds["test"]) +labels = output.label_ids +probs = torch.tensor(output.predictions) + +with open(f"./{model_name}_raw_scores.pkl", "wb") as f: + pickle.dump(probs.cpu().detach().numpy(), f) +with open(f"./{model_name}_raw_labels.pkl", "wb") as f: + pickle.dump(labels, f) \ No newline at end of file diff --git a/llama/modeling_llama_local.py b/llama/modeling_llama_local.py new file mode 100644 index 0000000..7f6d801 --- /dev/null +++ b/llama/modeling_llama_local.py @@ -0,0 +1,1259 @@ +# Copied by Joel in May of 2024 +# transformers==4.35.1 + +# coding=utf-8 +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch LLaMA model.""" +import math +import warnings +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.modeling_attn_mask_utils import AttentionMaskConverter, _prepare_4d_causal_attention_mask +from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS +from transformers.utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + logging, + replace_return_docstrings, +) +from transformers.utils.import_utils import is_torch_fx_available +from transformers.models.llama.configuration_llama import LlamaConfig + + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + +# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph. +# It means that the function will not be traced through and simply appear as a node in the graph. +if is_torch_fx_available(): + _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask) + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "LlamaConfig" + + +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + warnings.warn( + "Calling `transformers.models.llama.modeling_llama._prepare_4d_attention_mask` is deprecated and will be removed in v4.37. Use `transformers.modeling_attn_mask_utils.AttentionMaskConverter._prepare_4d_attention_mask" + ) + return AttentionMaskConverter._prepare_4d_attention_mask(mask=mask, dtype=dtype, tgt_len=tgt_len) + + +def _make_causal_mask( + input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 +): + warnings.warn( + "Calling `transformers.models.llama.modeling_llama._make_causal_mask` is deprecated and will be removed in v4.37. Use `transformers.models.llama.modeling_llama.AttentionMaskConverter._make_causal_mask" + ) + return AttentionMaskConverter._make_causal_mask( + input_ids_shape=input_ids_shape, dtype=dtype, device=device, past_key_values_length=past_key_values_length + ) + + +class LlamaRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + LlamaRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + +ALL_LAYERNORM_LAYERS.append(LlamaRMSNorm) + + +class LlamaRotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() + ) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype), + self.sin_cached[:seq_len].to(dtype=x.dtype), + ) + + +class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): + """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" + + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + t = t / self.scaling_factor + + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + +class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding): + """LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" + + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + + if seq_len > self.max_position_embeddings: + base = self.base * ( + (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) + ) ** (self.dim / (self.dim - 2)) + inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class LlamaMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + if self.config.pretraining_tp > 1: + slice = self.intermediate_size // self.config.pretraining_tp + gate_proj_slices = self.gate_proj.weight.split(slice, dim=0) + up_proj_slices = self.up_proj.weight.split(slice, dim=0) + down_proj_slices = self.down_proj.weight.split(slice, dim=1) + + gate_proj = torch.cat( + [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1 + ) + up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1) + + intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2) + down_proj = [ + F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp) + ] + down_proj = sum(down_proj) + else: + down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + return down_proj + + +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +class LlamaAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: LlamaConfig): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.is_causal = True + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias) + self._init_rope() + + def _init_rope(self): + if self.config.rope_scaling is None: + self.rotary_emb = LlamaRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + else: + scaling_type = self.config.rope_scaling["type"] + scaling_factor = self.config.rope_scaling["factor"] + if scaling_type == "linear": + self.rotary_emb = LlamaLinearScalingRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + elif scaling_type == "dynamic": + self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + else: + raise ValueError(f"Unknown RoPE scaling type {scaling_type}") + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + + bsz, q_len, _ = hidden_states.size() + + if self.config.pretraining_tp > 1: + key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp + query_slices = self.q_proj.weight.split( + (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0 + ) + key_slices = self.k_proj.weight.split(key_value_slicing, dim=0) + value_slices = self.v_proj.weight.split(key_value_slicing, dim=0) + + query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)] + query_states = torch.cat(query_states, dim=-1) + + key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)] + key_states = torch.cat(key_states, dim=-1) + + value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)] + value_states = torch.cat(value_states, dim=-1) + + else: + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights + attention_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + if self.config.pretraining_tp > 1: + attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2) + o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1) + attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)]) + else: + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class LlamaFlashAttention2(LlamaAttention): + """ + Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + # LlamaFlashAttention2 attention does not support output_attentions + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + + # overwrite attention_mask with padding_mask + attention_mask = kwargs.pop("padding_mask") + + output_attentions = False + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + # therefore we just need to keep the original shape + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + # TODO: llama does not have dropout in the config?? + # It is recommended to use dropout with FA according to the docs + # when training. + dropout_rate = 0.0 # if not self.training else self.attn_dropout + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (LlamaRMSNorm handles it correctly) + + input_dtype = query_states.dtype + if input_dtype == torch.float32: + # Handle the case where the model is quantized + if hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + attn_output = self._flash_attention_forward( + query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate + ) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=self.is_causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=self.is_causal + ) + + return attn_output + + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +class LlamaDecoderLayer(nn.Module): + def __init__(self, config: LlamaConfig): + super().__init__() + self.hidden_size = config.hidden_size + self.self_attn = ( + LlamaAttention(config=config) + if not getattr(config, "_flash_attn_2_enabled", False) + else LlamaFlashAttention2(config=config) + ) + self.mlp = LlamaMLP(config) + self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + **kwargs, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): + attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, + query_sequence_length, key_sequence_length)` if default attention is used. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + **kwargs, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +LLAMA_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`LlamaConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", + LLAMA_START_DOCSTRING, +) +class LlamaPreTrainedModel(PreTrainedModel): + config_class = LlamaConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["LlamaDecoderLayer"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +LLAMA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", + LLAMA_START_DOCSTRING, +) +class LlamaModel(LlamaPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] + + Args: + config: LlamaConfig + """ + + def __init__(self, config: LlamaConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape[:2] + elif inputs_embeds is not None: + batch_size, seq_length = inputs_embeds.shape[:2] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + past_key_values_length = 0 + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0) + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if getattr(self.config, "_flash_attn_2_enabled", False): + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + else: + # 4d mask is passed through the layers + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length + ) + + # embed positions + hidden_states = inputs_embeds + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + attention_mask, + position_ids, + past_key_value, + output_attentions, + use_cache, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class LlamaForCausalLM(LlamaPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model = LlamaModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, LlamaForCausalLM + + >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + if self.config.pretraining_tp > 1: + lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0) + logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)] + logits = torch.cat(logits, dim=-1) + else: + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + ): + if past_key_values is not None: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +@add_start_docstrings( + """ + The LLaMa Model transformer with a sequence classification head on top (linear layer). + + [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + LLAMA_START_DOCSTRING, +) +class LlamaForSequenceClassification(LlamaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = LlamaModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + logits.device + ) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + # print(labels) + # print(labels.dtype) + self.config.problem_type = "single_label_classification" + else: + # print("Running multi-label classification forward pass...") # added 05-15-24 + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + # print(labels) + # print(labels.dtype) + # print(pooled_logits) + # print(pooled_logits.dtype) + # print("Running multi-label classification forward pass...") # added 05-15-24 + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels.float()) # changed labels to labels.float() on 05-15-24 + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) \ No newline at end of file diff --git a/llama/modeling_unllama.py b/llama/modeling_unllama.py new file mode 100644 index 0000000..f8cfa73 --- /dev/null +++ b/llama/modeling_unllama.py @@ -0,0 +1,546 @@ +# -*- coding: utf-8 -*- + +from copy import deepcopy + +from transformers.models.llama.modeling_llama import * +from transformers.modeling_outputs import TokenClassifierOutput + + +# _CONFIG_FOR_DOC = "MeditronLlamaConfig" + +_CONFIG_FOR_DOC = "LlamaConfig" + + +# Copied from transformers.models.bart.modeling_bart._make_causal_mask +def _make_causal_mask( + input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 +): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + +# Copied from transformers.models.bart.modeling_bart._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +class UnmaskingLlamaModel(LlamaPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] + + Args: + config: LlamaConfig + """ + + def __init__(self, config: LlamaConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + inputs_embeds.dtype, + device=inputs_embeds.device, + past_key_values_length=past_key_values_length, + ) + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + seq_length_with_past = seq_length + past_key_values_length = 0 + + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + seq_length_with_past = seq_length_with_past + past_key_values_length + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0).view(-1, seq_length) + else: + position_ids = position_ids.view(-1, seq_length).long() + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + # embed positions + if attention_mask is None: + attention_mask = torch.ones( + (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device + ) + # causal mask + ''' + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length + ) + print('unmasking attention mask:') + print(attention_mask) + ''' + # remove causal mask + attention_mask = torch.zeros( + (batch_size, 1, seq_length, seq_length), device=inputs_embeds.device + ) + + hidden_states = inputs_embeds + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + attention_mask, + position_ids, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +@add_start_docstrings( + """ + The LLaMa Model transformer with a sequence classification head on top (linear layer). + + [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + LLAMA_START_DOCSTRING, +) +class UnmaskingLlamaForSequenceClassification(LlamaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = UnmaskingLlamaModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + self.pooling = 'mean' + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def set_pooling(self, pooling): + self.pooling = pooling + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + logits.device + ) + else: + sequence_lengths = -1 + + if self.pooling == 'last': + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + elif self.pooling == 'max': + pooled_logits, _ = torch.max(logits, dim=1) + elif self.pooling == 'mean': + pooled_logits = torch.mean(logits, dim=1) + else: + raise NotImplementedError + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + # if self.num_labels == 1: + # self.config.problem_type = "binary_classification" + # print("Binary classification problem type") + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + # if self.config.problem_type == "binary_classification": + # loss_fct = BCEWithLogitsLoss() + # loss = loss_fct(pooled_logits, labels) + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels.float()) # changed labels to labels.float() on 05-22-24 + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + +@add_start_docstrings( + """ + The LLaMa Model transformer with a token classification head on top (linear layer). + + [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + LLAMA_START_DOCSTRING, +) +class LlamaForTokenClassification(LlamaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = LlamaModel(config) + self.dropout = nn.Dropout(0.1) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + The LLaMa Model transformer with a token classification head on top (linear layer). + + [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + LLAMA_START_DOCSTRING, +) +class UnmaskingLlamaForTokenClassification(LlamaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = UnmaskingLlamaModel(config) + self.dropout = nn.Dropout(0.1) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + diff --git a/llama/params.yml b/llama/params.yml new file mode 100644 index 0000000..cc39e64 --- /dev/null +++ b/llama/params.yml @@ -0,0 +1,29 @@ + +dataset_path: '../text_label.hf' +tokenizer_id: 'Meta-Llama-3-8B/mlflow_model_folder/data/model' +model_id: 'mlflow_model_folder/data/model' +output_path: 'output' +model_name: 'Meta-Llama-3-8B' + +train: True +resume_training: False +resume_checkpoint: "" +test_checkpoint: "" +unllama: False # controls autoregressive masking + +batch_size: 2 +gradient_accumulation_steps: 64 +early_stopping_patience: 10 +learning_rate: 0.0001 +lora_r: 16 +lora_a: 32 +max_length: 2048 # was 8192 +warmup_steps: 500 +eval_steps: 200 +save_steps: 200 +logging_steps: 200 +pooling_strategy: 'max' # use max for unllama, LlamaForSequenceClassification uses the last token to do the classification, as other causal models (e.g. GPT-2) do + +id2label: + 0: "Negative" + 1: "Positive" diff --git a/llama/plot_trainer_state.py b/llama/plot_trainer_state.py new file mode 100644 index 0000000..e77c916 --- /dev/null +++ b/llama/plot_trainer_state.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +# coding: utf-8 + +""" +Given the path to a trainer state from Hugging Face transformers, plot the learning curves and learning rate. +Plots are saved to the checkpoint directory as well as the working directory for easy access. +This script assumes the number of logging steps, save steps, and eval steps is equal. +It also assumes that validation loss is used as the monitoring metric for checkpointing. +""" + +# Open imports +import os +import json +import yaml +import matplotlib.pyplot as plt + +# Set checkpoint path to evaluate +CHECKPOINT_PATH = ( + "../ls-Meta-Llama-3-8B-msp-v2-mdace-20/run_2/checkpoint-8000" +) + + + +def main(): + + # Get working dir + working_dir = os.getcwd() + + # Load trainer state + trainer_state_file = os.path.join(CHECKPOINT_PATH, "trainer_state.json") + with open(trainer_state_file) as f: + states = json.load(f) + + # Get global step and best val loss + global_step = states["global_step"] + best_val_loss = states["best_metric"] + + # Get log history + steps = [] + train_loss = [] + val_loss = [] + lrs = [] + for i, state in enumerate(states["log_history"]): + + # Every other entry in the log history + # Contains the training info + if i % 2 == 0: + steps.append(state["step"]) + train_loss.append(state["loss"]) + lrs.append(state["learning_rate"]) + else: + val_loss.append(state["eval_loss"]) + + # Plot train and eval loss + fig, ax = plt.subplots(figsize=(8, 8)) + ax.plot(steps, train_loss, label="Train Loss") + ax.plot(steps, val_loss, label="Val Loss") + ax.legend(loc="best", prop={"size": 10}) + plt.title(f"Learning Curves at Checkpoint {global_step}", fontsize=10) + plt.xlabel("Step", fontsize=10) + plt.ylabel("Loss", fontsize=10) + plt.tight_layout() + plt.savefig(os.path.join(CHECKPOINT_PATH, "learning_curves.png")) + plt.savefig(os.path.join(working_dir, "learning_curves.png")) + print(f"Best validation loss = {best_val_loss}.") + + # Plot learning rate + fig, ax = plt.subplots(figsize=(8, 8)) + ax.plot(steps, lrs, label="Learning Rate") + ax.legend(loc="best", prop={"size": 10}) + plt.title(f"Learning Rates at Checkpoint {global_step}", fontsize=10) + plt.xlabel("Step", fontsize=10) + plt.ylabel("Learning Rate", fontsize=10) + plt.tight_layout() + plt.savefig(os.path.join(CHECKPOINT_PATH, "learning_rates.png")) + plt.savefig(os.path.join(working_dir, "learning_rates.png")) + print(f"Current learning rate = {lrs[-1]}.") + + +if __name__ == "__main__": + + main() diff --git a/llama/requirements.txt b/llama/requirements.txt new file mode 100644 index 0000000..c4902e2 --- /dev/null +++ b/llama/requirements.txt @@ -0,0 +1,7 @@ +datasets +evaluate +numpy +peft +transformers==4.35.1 +sentencepiece==0.1.99 +torch==1.13.1 \ No newline at end of file diff --git a/llama/unllama_token_clf.py b/llama/unllama_token_clf.py new file mode 100644 index 0000000..ed7c3d0 --- /dev/null +++ b/llama/unllama_token_clf.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- + +import sys +import json +import numpy as np +import evaluate +from datasets import load_dataset, Dataset, DatasetDict +from transformers import AutoTokenizer +from transformers import DataCollatorForTokenClassification +from transformers import TrainingArguments, Trainer +from peft import get_peft_model, LoraConfig, TaskType + +from modeling_llama import UnmaskingLlamaForTokenClassification + + +def load_ontonotesv5(): + ret = {} + for split_name in ['train', 'dev', 'test']: + data = [] + with open(f'./data/ontonotesv5/{split_name}.jsonl', 'r') as reader: + for line in reader: + data.append(json.loads(line)) + ret[split_name] = Dataset.from_list(data) + return DatasetDict(ret) + + +if len(sys.argv) != 3: + print('usage python %.py task model_size') + sys.exit() + +task, model_size = sys.argv[1], sys.argv[2].lower() +print(f'handling task {task}') + +epochs = 10 +batch_size = 8 +learning_rate = 1e-4 +max_length = 64 +lora_r = 12 +if model_size == '7b': + model_id = 'NousResearch/Llama-2-7b-hf' +elif model_size == '13b': + model_id = 'NousResearch/Llama-2-13b-hf' +else: + raise NotImplementedError +tokenizer = AutoTokenizer.from_pretrained(model_id) +seqeval = evaluate.load("seqeval") +if task == 'wnut_17': + ds = load_dataset("wnut_17") + label2id = { "O": 0, "B-corporation": 1, "I-corporation": 2, "B-creative-work": 3, "I-creative-work": 4, "B-group": 5, "I-group": 6, "B-location": 7, "I-location": 8, "B-person": 9, "I-person": 10, "B-product": 11, "I-product": 12, } +elif task == 'conll2003': + ds = load_dataset("conll2003") + label2id = {'O': 0, 'B-PER': 1, 'I-PER': 2, 'B-ORG': 3, 'I-ORG': 4, 'B-LOC': 5, 'I-LOC': 6, 'B-MISC': 7, 'I-MISC': 8} +elif task == 'ontonotesv5': + ds = load_ontonotesv5() + label2id = {'O': 0, 'B-NORP': 1, 'B-PERSON': 2, 'B-WORK_OF_ART': 3, 'B-QUANTITY': 4, 'B-EVENT': 5, 'B-DATE': 6, 'B-TIME': 7, 'B-PERCENT': 8, 'B-LANGUAGE': 9, 'B-ORG': 10, 'B-CARDINAL': 11, 'B-LAW': 12, 'B-GPE': 13, 'B-PRODUCT': 14, 'B-LOC': 15, 'B-MONEY': 16, 'B-ORDINAL': 17, 'B-FAC': 18} +else: + raise NotImplementedError +id2label = {v: k for k, v in label2id.items()} +label_list = list(label2id.keys()) # ds["train"].features[f"ner_tags"].feature.names +model = UnmaskingLlamaForTokenClassification.from_pretrained( + model_id, num_labels=len(label2id), id2label=id2label, label2id=label2id +).bfloat16() +peft_config = LoraConfig(task_type=TaskType.TOKEN_CLS, inference_mode=False, r=lora_r, lora_alpha=32, lora_dropout=0.1) +model = get_peft_model(model, peft_config) +model.print_trainable_parameters() + + +def tokenize_and_align_labels(examples): + tokenized_inputs = tokenizer(examples["tokens"], is_split_into_words=True, padding='longest', max_length=max_length, truncation=True) + + labels = [] + for i, label in enumerate(examples[f"ner_tags"]): + word_ids = tokenized_inputs.word_ids(batch_index=i) # Map tokens to their respective word. + previous_word_idx = None + label_ids = [] + for word_idx in word_ids: # Set the special tokens to -100. + if word_idx is None: + label_ids.append(-100) + elif word_idx != previous_word_idx: # Only label the first token of a given word. + label_ids.append(label[word_idx]) + else: + label_ids.append(-100) + previous_word_idx = word_idx + labels.append(label_ids) + + tokenized_inputs["labels"] = labels + return tokenized_inputs + + +tokenized_ds = ds.map(tokenize_and_align_labels, batched=True) +data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer) + + +def compute_metrics(p): + predictions, labels = p + predictions = np.argmax(predictions, axis=2) + + true_predictions = [ + [label_list[p] for (p, l) in zip(prediction, label) if l != -100] + for prediction, label in zip(predictions, labels) + ] + true_labels = [ + [label_list[l] for (p, l) in zip(prediction, label) if l != -100] + for prediction, label in zip(predictions, labels) + ] + + results = seqeval.compute(predictions=true_predictions, references=true_labels) + return { + "precision": results["overall_precision"], + "recall": results["overall_recall"], + "f1": results["overall_f1"], + "accuracy": results["overall_accuracy"], + } + + +training_args = TrainingArguments( + output_dir="my_awesome_ds_model", + learning_rate=learning_rate, + per_device_train_batch_size=batch_size, + per_device_eval_batch_size=batch_size, + num_train_epochs=epochs, + weight_decay=0.01, + evaluation_strategy="epoch", + save_strategy="epoch", + load_best_model_at_end=True, + push_to_hub=False, +) + +trainer = Trainer( + model=model, + args=training_args, + train_dataset=tokenized_ds["train"], + eval_dataset=tokenized_ds["test"], + tokenizer=tokenizer, + data_collator=data_collator, + compute_metrics=compute_metrics, +) + +trainer.train() diff --git a/llama/utils.py b/llama/utils.py new file mode 100644 index 0000000..de73f3d --- /dev/null +++ b/llama/utils.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python +# coding: utf-8 + +""" +Utility functions for deep learning experiments +""" + +import os +import yaml +import torch +import numpy as np +from sklearn.multiclass import OneVsRestClassifier + + +def create_current_run(save_path, params, logger=None): + """ + Create a directory for the current run, save the + current pipeline parameters, and return the + path to the current run directory. + """ + + # Create current run dir + src_dirs = os.listdir(save_path) + max_run = ( + max([int(dir.split("_")[1]) for dir in src_dirs]) if len(src_dirs) > 0 else -1 + ) + current_run_dir = os.path.join(save_path, "run_" + str(max_run + 1) + "/") + os.makedirs(current_run_dir) + + if logger: + logger.info(f"Created current run dir: {current_run_dir}.") + + # Save run params in current run dir for reference + with open(os.path.join(current_run_dir, "params.yml"), "w") as stream: + yaml.dump(params, stream, default_flow_style=False) + + if logger: + logger.info(f"Saved run parameter to current run dir.") + + return current_run_dir + + +def create_log_dir(current_run_dir, logger=None): + + logging_dir = os.path.join(current_run_dir, "logs/") + + if not os.path.exists(logging_dir): + os.makedirs(logging_dir) + + if logger: + logger.info(f"Created logging directory: {logging_dir}.") + + +def check_empty_count_gpus(logger=None): + """ + Check that GPU is available, empty the cache, + and count the number of available devices. + """ + + # Check that a GPU is available: + assert torch.cuda.is_available(), "No GPU found. Please run on a GPU." + + # Empty GPU cache + torch.cuda.empty_cache() + + # Count available devices + device_count = torch.cuda.device_count() + + if logger: + logger.info(f"Found {device_count} GPU(s)!") + + +def convert_1d_binary_labels_to_2d(labels): + """ + Convert 1D binary labels to a 2D representation. + """ + + # Convert a 1D, binary label array to 2D + if isinstance(labels[0], np.integer) or isinstance(labels[0], int): + + # Check that we have a 1D array of 1s and 0s + assert len(np.array(labels).shape), "Expected labels to be 1D." + assert all( + x == 0 or x == 1 for x in labels + ), "Expected only 1s and 0s in labels." + + # Convert to 2D representation + new_labels = np.zeros(shape=(len(labels), 2)) + for i, target in enumerate(labels): + if target == 0: + new_labels[i] = [1, 0] + elif target == 1: + new_labels[i] = [0, 1] + else: + raise ValueError(f"Unexpected target: {target}.") + + return new_labels + + # Return 2D array + else: + + if isinstance(labels, (np.ndarray, np.generic)): + return labels + else: + return np.array(labels) + + +def make_lr_model_and_target_multi_class(model, y, class_strategy, n_jobs=-1): + """ + Given an sklearn LogisticRegression model and + a parameter indicating the multi-class training strategy + convert the model to a OneVsRestClassifier or + multinomial regression and return it with the + n_jobs parameter set to parallelize training. + Also returns the target array such that the final + return type is a tuple of (model, y) and y is + modified to use multi_class indices if + class_strategy='multi_class'. + """ + + if class_strategy == "multi_label": + + # Wrap model in OVR classifier + model = OneVsRestClassifier(model, n_jobs=n_jobs) + + elif class_strategy == "multi_class": + + # Set model attributes + model.multi_class = "multinomial" + model.n_jobs = n_jobs + + # Transform target array + y = transform_target_to_multi_class_indices(y) + + else: + + # Raise exception + raise ValueError( + f"Expected class_strategy to be one of ['multi_label', 'multi_class'] but got {class_strategy}." + ) + + return model, y + + +def transform_target_to_multi_class_indices(y): + """ + Given a 2d numpy array of one hot encoded + targets, return an array of the indices + representing the encoded label for each sample + as is required for sklearn multi-class classification. + """ + + return np.argmax(y, axis=1) diff --git a/long_roberta/README.md b/long_roberta/README.md new file mode 100644 index 0000000..924295f --- /dev/null +++ b/long_roberta/README.md @@ -0,0 +1,27 @@ +# torch_long_bert + +Code to fine-tune and evaluate long versions of BERT and BERT-like LMs from Hugging Face Transformers + +### Contents + +- [About](#about) +- [Environment](#environment) +- [Data Prep](#data-prep) +- [Train and Evaluate](#train-and-evaluate) + +### About + +This repository contains code to fine-tune and evaluate long versions of BERT and BERT-like LMs from Hugging Face Transformers using base PyTorch. The code in this directory has been modified from [this repository](https://github.com/mim-solutions/roberta_for_longer_texts) +and was originally written by [MichalBrzozowski91](https://github.com/MichalBrzozowski91) to implement [this suggestion](https://github.com/google-research/bert/issues/27#issuecomment-435265194) from [jacobdevlin-google](https://github.com/jacobdevlin-google). The core idea is to fine-tune a base BERT model by getting the representations from multiple concatenated windows of text with some overlap and applying sigmoid over each window to generate predictions. The final predictions are then taken as either the average or max value of the sigmoid output of all windows in a sample. + +### Environment + +To build the Python 3.10 environment required to run this code, create a Python 3.10 virtual environment with [Anaconda](https://www.anaconda.com/products/individual) and install the dependencies in `../requirements.txt`. + +### Data Prep + +This code takes as input a HuggingFace dataset with text and label columns. + +### Training + +Training and evaluation are combined into one script. After modifying `params.yml`, run `python train_and_evaluate.py` to fine-tune a long version of a BASE BERT model specified in `params.yml`. Make sure the BERT model you wish to fine-tune exists on the file system from which you run `train_and_evaluate.py`. Predictions on the test set are generated after every epoch but only used for the best model checkpoint to compute test set performance. This behavior could be adjusted to improve training efficiency, but because checkpoints are not actually saved, it would be necessary to implement checkpoint saving and loading in the code first. diff --git a/long_roberta/architecture.py b/long_roberta/architecture.py new file mode 100644 index 0000000..82b1e4f --- /dev/null +++ b/long_roberta/architecture.py @@ -0,0 +1,49 @@ +import yaml +import torch +import torch.nn as nn + + +class BERTSequenceClassificationHead(nn.Module): + + def __init__(self): + + super().__init__() + + with open("params.yml", "r") as stream: + params = yaml.safe_load(stream) + self.params = params + + self.out_proj = nn.Linear(params['linear_dim'], self.params['num_labels']) + self.sigmoid = nn.Sigmoid() + + def forward(self, cls_token_hidden_state): + + x = cls_token_hidden_state + x = self.out_proj(x) + x = self.sigmoid(x) + + return x + + +class BERTSequenceClassificationArch(nn.Module): + + def __init__(self, bert): + + super().__init__() + self.bert = bert + self.classification_head = BERTSequenceClassificationHead() + + def forward(self, input_ids, attention_mask): + + x = bert_vectorize(self.bert, input_ids, attention_mask) + x = self.classification_head(x) + return x + + +def bert_vectorize(bert, input_ids, attention_mask): + + outputs = bert(input_ids, attention_mask) + sequence_output = outputs[0] + + vectorized = sequence_output[:, 0, :] # take token (equiv. to [CLS]) + return vectorized diff --git a/long_roberta/base_model.py b/long_roberta/base_model.py new file mode 100644 index 0000000..b1df4ba --- /dev/null +++ b/long_roberta/base_model.py @@ -0,0 +1,268 @@ +#!/usr/bin/env python +# coding: utf-8 + +""" +Base PyTorch model code for training and evaluation +""" + +import yaml +import numpy as np +import pandas as pd + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from sklearn.metrics import f1_score +from torch.utils.data import DataLoader, RandomSampler, SequentialSampler + + +class Model(): + """ + Abstract class for models + """ + + def __init__(self): + + with open("params.yml", "r") as stream: + params = yaml.safe_load(stream) + + self.params = params + self.preprocessor = None + self.dataset_class = None + self.collate_fn = None + + def evaluate_single_batch(self, batch, model, device): + + raise NotImplementedError("This is implemented for subclasses only") + + def create_dataset(self, X_preprocessed, y): + + dataset = self.dataset_class(X_preprocessed, y) + + return dataset + + def train_and_evaluate(self, X_train, X_val, X_test, y_train, y_val, y_test, epochs, early_stopping_epochs, logger): + + # Compute number of samples + number_of_train_samples = len(X_train) + number_of_val_samples = len(X_val) + number_of_test_samples = len(X_test) + + # Text preprocessing + X_train_preprocessed = self.preprocessor.preprocess(X_train) + X_val_preprocessed = self.preprocessor.preprocess(X_val) + X_test_preprocessed = self.preprocessor.preprocess(X_test) + + # Creating datasets + train_dataset = self.create_dataset(X_train_preprocessed, y_train) + val_dataset = self.create_dataset(X_val_preprocessed, y_val) + test_dataset = self.create_dataset(X_test_preprocessed, y_test) + + # Creating dataloaders + train_dataloader = create_train_dataloader( + train_dataset, self.params['batch_size'], self.collate_fn) + val_dataloader = create_train_dataloader( + val_dataset, self.params['batch_size'], self.collate_fn) + test_dataloader = create_test_dataloader( + test_dataset, self.params['batch_size'], self.collate_fn) + + # Training and evaluating + result = self.train_and_evaluate_preprocessed( + number_of_train_samples, + train_dataloader, + number_of_val_samples, + val_dataloader, + number_of_test_samples, + test_dataloader, + epochs, + early_stopping_epochs, + logger + ) + + return result + + def train_and_evaluate_preprocessed( + self, + number_of_train_samples, + train_dataloader, + number_of_val_samples, + val_dataloader, + number_of_test_samples, + test_dataloader, + epochs, + early_stopping_epochs, + logger + ): + + result = { + 'train_loss': [], + 'val_loss': [], + 'test_preds': [], + 'test_labels': [] + } + + for epoch in range(epochs): + + # Run train epoch + avg_loss, avg_lr = self.train_single_epoch(number_of_train_samples, train_dataloader) + logger.info(f'Epoch: {epoch}, Train Loss: {avg_loss:.10f}, Avg LR: {avg_lr:.10f}') + result['train_loss'].append(avg_loss) + + # Evaluate + avg_loss, _, _ = self.evaluate_single_epoch(number_of_val_samples, val_dataloader) + logger.info(f'Epoch: {epoch}, Val Loss: {avg_loss:.10f}') + result['val_loss'].append(avg_loss) + + # Predict on test set and save (we should really only do this at the end but need to save the model somehow first) + preds, labels = self.predict(number_of_test_samples, test_dataloader, with_labels=True) + result['test_preds'].append(preds) + result['test_labels'].append(labels) + + # Compute best epoch + best_epoch = np.argmin(result['val_loss']) + best_val_loss = np.min(result["val_loss"]) + epochs_since_best = result['val_loss'][best_epoch:] + + # Early stop if too many epochs have passed since the best epoch (we should also checkpoint the model here) + if len(epochs_since_best) > early_stopping_epochs: + logger.info(f"Stopping at epoch {epoch}. Best val loss of {best_val_loss:.10f} occurred at epoch {best_epoch}.") + return result + + return result + + def predict(self, number_of_test_samples, test_dataloader, with_labels=False): + + # Predict on test data loader + _, preds, labels = self.evaluate_single_epoch(number_of_test_samples, test_dataloader) + + # Return labels if specificed + if with_labels: + return preds, labels + else: + return preds + + def train_single_epoch(self, number_of_train_samples, train_dataloader): + + model = self.nn + model.train() + + total_loss = 0 + # total_micro_f1 = 0 + # total_macro_f1 = 0 + total_lr = 0 + + # Iterate over batches + for step, batch in enumerate(train_dataloader): + + preds, labels = self.evaluate_single_batch(batch, model, self.params['device']) + + # Compute the loss between actual and predicted values + loss = compute_loss(preds, labels) + + # Backward pass to calculate the gradients + loss.backward() + + # Add to total loss + total_loss += loss.detach().cpu().numpy() + + # # Accumulate gradients + # step_plus_one = step + 1 + if (step + 1) % self.params['accumulation_steps'] == 0: + + # Update parameters + self.optimizer.step() + self.scheduler.step() + + # Zero the parameter gradients + self.optimizer.zero_grad() + + # Add LR at step + total_lr += self.optimizer.param_groups[0]['lr'] + + # Compute the train loss of the epoch + avg_loss = total_loss / number_of_train_samples + avg_lr = total_lr / number_of_train_samples + + return avg_loss, avg_lr + + def evaluate_single_epoch(self, val_samples, val_dataloader): + + model = self.nn + model.eval() + + total_loss = 0 + preds_total = [] + labels_total = [] + + # Iterate over batches + for step, batch in enumerate(val_dataloader): + + # Deactivate autograd + with torch.no_grad(): + + # Generate predictions + preds, labels = self.evaluate_single_batch(batch, model, self.params['device']) + preds_total.extend(preds) + labels_total.extend(labels) + + # Compute the validation loss between actual and predicted values + loss = compute_loss(preds, labels) + total_loss += loss.detach().cpu().numpy() + + # Compute the evaluation loss of the epoch + preds_total = [x.tolist() for x in preds_total] + labels_total = [x.tolist() for x in labels_total] + avg_loss = total_loss / val_samples + + return avg_loss, preds_total, labels_total + + +def create_dataloader(data, sampler_class, batch_size, collate_fn=None): + + sampler = sampler_class(data) + dataloader = DataLoader( + data, + sampler=sampler, + batch_size=batch_size, + collate_fn=collate_fn) + + return dataloader + +def create_train_dataloader(train_data, batch_size, collate_fn=None): + + train_dataloader = create_dataloader( + train_data, RandomSampler, batch_size, collate_fn) + + return train_dataloader + +def create_val_dataloader(val_data, batch_size, collate_fn=None): + + val_dataloader = create_dataloader( + val_data, SequentialSampler, batch_size, collate_fn) + + return val_dataloader + +def create_test_dataloader(test_data, batch_size, collate_fn=None): + + test_dataloader = create_dataloader( + test_data, SequentialSampler, batch_size, collate_fn) + + return test_dataloader + + +def create_dataloaders(train_data, val_data, batch_size, collate_fn=None): + + train_dataloader = create_train_dataloader( + train_data, batch_size, collate_fn) + val_dataloader = create_val_dataloader(val_data, batch_size, collate_fn) + + return train_dataloader, val_dataloader + + +def compute_loss(preds, labels): + + loss = F.binary_cross_entropy(preds, labels.type_as(preds), reduction='sum') + + return loss + diff --git a/long_roberta/custom_datasets.py b/long_roberta/custom_datasets.py new file mode 100644 index 0000000..6f02aea --- /dev/null +++ b/long_roberta/custom_datasets.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# coding: utf-8 + +""" +Dataset classes and collate functions +""" + +from torch.utils.data import Dataset + +class TextDataset(Dataset): + """ + Dataset for raw texts with labels + """ + + def __init__(self, texts, labels): + self.texts = texts + self.labels = labels + + def __len__(self): + return len(self.labels) + + def __getitem__(self, idx): + return self.texts[idx], self.labels[idx] + + +class TokenizedDataset(Dataset): + """ + Dataset for tokens with labels + """ + + def __init__(self, tokens, labels): + self.input_ids = tokens['input_ids'] + self.attention_mask = tokens['attention_mask'] + self.labels = labels + + def __len__(self): + return len(self.labels) + + def __getitem__(self, idx): + return self.input_ids[idx], self.attention_mask[idx], self.labels[idx] + + +def collate_fn_pooled_tokens(data): + + input_ids = [data[i][0] for i in range(len(data))] + attention_mask = [data[i][1] for i in range(len(data))] + labels = [data[i][2] for i in range(len(data))] + collated = [input_ids, attention_mask, labels] + + return collated diff --git a/long_roberta/evaluate_models.ipynb b/long_roberta/evaluate_models.ipynb new file mode 100644 index 0000000..0a96939 --- /dev/null +++ b/long_roberta/evaluate_models.ipynb @@ -0,0 +1,390 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "5d7b1d18", + "metadata": {}, + "source": [ + "### Evaluate Models\n" + ] + }, + { + "cell_type": "markdown", + "id": "80b1b2c7", + "metadata": {}, + "source": [ + "##### Imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "87dc70f1", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import pickle\n", + "import json\n", + "import multiprocessing\n", + "import numpy as np\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "import torch.nn.functional as F\n", + "from sklearn import metrics\n", + "from datasets import load_from_disk, Dataset\n", + "from transformers import AutoTokenizer" + ] + }, + { + "cell_type": "markdown", + "id": "ed3988f7", + "metadata": {}, + "source": [ + "##### Evaluation Parameters" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2a1fc80e", + "metadata": {}, + "outputs": [], + "source": [ + "threshold = 0.5 # currently we don't maximize val f1 to find the threshold... need to grab scores for all the val sets if we do this\n", + "num_std = 1.96\n", + "num_bootstrap = 1000\n", + "line_width = 2\n", + "alpha = 0.2\n", + "font_size = 16\n", + "legend_size = 10\n", + "x_size = 10\n", + "y_size = 10" + ] + }, + { + "cell_type": "markdown", + "id": "1ac2d927-76e0-48ab-8777-48bc70206d07", + "metadata": {}, + "source": [ + "##### Initialize Score, Model, and Color Arrays" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "acc793b2-80d0-45ac-9a3a-15dcf8fb53fb", + "metadata": {}, + "outputs": [], + "source": [ + "# Define master lists of labels, scores, names, and colors\n", + "all_y_trues, all_y_scores, all_model_names, all_colors = [], [], [], []" + ] + }, + { + "cell_type": "markdown", + "id": "aef4eaf2-fff5-4f83-8ade-2367a2513aa8", + "metadata": {}, + "source": [ + "##### Load Fine-Tuned Torch LM Results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26d15d8e-81cc-4cd5-a2dc-765325cb5a55", + "metadata": {}, + "outputs": [], + "source": [ + "file_info = [('a', 'b', 'c'), ('x', 'y', 'z') ]\n", + " \n", + " \n", + "for label_file, score_file, model_name in file_info: \n", + " with open(label_file, \"rb\") as f: \n", + " labels = pickle.load(f) \n", + " with open(score_file, \"rb\") as f: \n", + " scores = pickle.load(f)\n", + " \n", + " # In the case of the 2048 model, get the score for the 1 label\n", + " if \"RoBERTa (2048)\" in model_name:\n", + " scores = scores[:,1]\n", + " \n", + " all_model_names.append(model_name) \n", + " all_y_trues.append(labels) \n", + " all_y_scores.append(scores) " + ] + }, + { + "cell_type": "markdown", + "id": "248ce15f-2ece-4a51-ab68-83889c25be80", + "metadata": {}, + "source": [ + "##### Define Recall at Precision Metric" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d34e75b9-4adc-4160-af56-9fc85a0c217c", + "metadata": {}, + "outputs": [], + "source": [ + "def recall_at_precision(scores, labels, target_precision):\n", + " \n", + " # Compute precision-recall curve \n", + " precision, recall, thresholds = metrics.precision_recall_curve(labels, scores) \n", + "\n", + " # Find the highest recall where precision >= target_precision \n", + " max_recall = recall[np.where(precision >= target_precision)].max() \n", + "\n", + " return max_recall " + ] + }, + { + "cell_type": "markdown", + "id": "6baed460", + "metadata": {}, + "source": [ + "##### Define a Function to Print the Mean and Confidence Interval for a Given Metric" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26a6d07e", + "metadata": {}, + "outputs": [], + "source": [ + "def print_mean_ci_of_metric_list(metric_list, metric_name, num_std):\n", + " mean_metric = np.mean(metric_list)\n", + " std_metric = np.std(metric_list)\n", + " metric_low = np.maximum(mean_metric - std_metric * num_std, 0)\n", + " metric_high = np.minimum(mean_metric + std_metric * num_std, 1)\n", + "\n", + " print(\n", + " f\"{metric_name}: {round(mean_metric, 3)} ([{round(metric_low, 3)} - {round(metric_high, 3)}] 95% CI)\"\n", + " )" + ] + }, + { + "cell_type": "markdown", + "id": "39681795-9909-4c09-b84d-f229b4663c4c", + "metadata": {}, + "source": [ + "##### Define a Function to Select a Threshold" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "93cc4daf-6656-4518-8f68-21ea77ab3161", + "metadata": {}, + "outputs": [], + "source": [ + "def get_threshold_of_best_val_f1(val_scores, val_labels):\n", + " \n", + " # Find the best threshold by maximizing F1 score\n", + " print(\" Computing best threshold for F1 on validation set...\")\n", + " best_val_f1 = 0\n", + " best_threshold = 0\n", + " for int_threshold in range(0, 100, 1):\n", + " threshold = int_threshold / 100\n", + " sample_preds = [1 if x >= threshold else 0 for x in val_probs]\n", + " f1 = metrics.f1_score(y_true=val_labels, y_pred=sample_preds)\n", + " if f1 > best_val_f1:\n", + " print(f\" Found new best F1 {f1:.4f} at threshold {threshold}\")\n", + " best_val_f1 = f1\n", + " best_threshold = threshold\n", + " \n", + " return best_threshold" + ] + }, + { + "cell_type": "markdown", + "id": "6f21cd42", + "metadata": {}, + "source": [ + "##### Print Performance for all Metrics for all Models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4d6ff4a8", + "metadata": {}, + "outputs": [], + "source": [ + "mean_fpr_linspace = np.linspace(0, 1, 100)\n", + "mean_recall_linspace = np.linspace(0, 1, 100)\n", + "\n", + "model2metric_df = {}\n", + "for y_trues, y_scores, name in zip(\n", + " all_y_trues, all_y_scores, all_model_names\n", + "):\n", + " accuracies, recalls, precisions, aps, interp_ps, roc_aucs, interp_tprs, f1s, rs_at_p90, static_fprs, static_tprs = [], [], [], [], [], [], [], [], [], [], []\n", + " for i in range(num_bootstrap):\n", + " \n", + " # Sample N records with replacement where N is the total number of records\n", + " sample_indices = np.random.choice(len(y_trues), len(y_trues))\n", + " sample_labels = np.array(y_trues)[sample_indices]\n", + " sample_scores = np.array(y_scores)[sample_indices]\n", + " \n", + " # Generate thresholded prediction\n", + " # threshold = get_threshold_of_best_val_f1(val_scores=y_val_scores, val_labels=y_val_trues)\n", + " sample_preds = [1 if x >= threshold else 0 for x in sample_scores]\n", + "\n", + " accuracy = metrics.accuracy_score(y_true=sample_labels, y_pred=sample_preds)\n", + " accuracies.append(accuracy)\n", + " \n", + "# recall = metrics.recall_score(y_true=sample_labels, y_pred=sample_preds)\n", + "# recalls.append(recall)\n", + "\n", + "# precision = metrics.precision_score(y_true=sample_labels, y_pred=sample_preds)\n", + "# precisions.append(precision)\n", + " \n", + "# f1 = metrics.f1_score(y_true=sample_labels, y_pred=sample_preds)\n", + "# f1s.append(f1)\n", + " \n", + " ap = metrics.average_precision_score(y_true=sample_labels, y_score=sample_scores)\n", + " aps.append(ap)\n", + " \n", + " p, r, thresholds = metrics.precision_recall_curve(y_true=sample_labels, probas_pred=sample_scores)\n", + " interp_p = np.interp(mean_recall_linspace, np.fliplr([r])[0], np.fliplr([p])[0])\n", + " interp_ps.append(interp_p)\n", + " \n", + " roc_auc = metrics.roc_auc_score(y_true=sample_labels, y_score=sample_scores)\n", + " roc_aucs.append(roc_auc)\n", + " \n", + " fpr, tpr, _ = metrics.roc_curve(y_true=sample_labels, y_score=sample_scores)\n", + " \n", + " if 'GPT-4' in name or 'Text Gen' in name:\n", + " static_fprs.append(fpr[1])\n", + " static_tprs.append(tpr[1])\n", + " else:\n", + " static_fprs.append(None)\n", + " static_tprs.append(None)\n", + " \n", + " interp_tpr = np.interp(mean_fpr_linspace, fpr, tpr)\n", + " interp_tpr[0] = 0.0\n", + " interp_tprs.append(interp_tpr)\n", + " \n", + " r_at_p90 = recall_at_precision(scores=sample_scores, labels=sample_labels, target_precision=0.9)\n", + " rs_at_p90.append(r_at_p90)\n", + "\n", + " # \"recalls\": recalls,\n", + " # \"precisions\": precisions,\n", + " # \"f1s\": f1s,\n", + " \n", + " metric_df = pd.DataFrame({\n", + " \"aps\": aps,\n", + " \"roc_aucs\": roc_aucs,\n", + " })\n", + " model2metric_df[name] = metric_df\n", + "\n", + " print(f\"\\nResults for {name}\\n\")\n", + " # print_mean_ci_of_metric_list(recalls, metric_name=\"Recall\", num_std=num_std)\n", + " # print_mean_ci_of_metric_list(precisions, metric_name=\"Precision\", num_std=num_std)\n", + " # print_mean_ci_of_metric_list(f1s, metric_name=\"F1\", num_std=num_std)\n", + " print_mean_ci_of_metric_list(aps, metric_name=\"Average Precision\", num_std=num_std)\n", + " print_mean_ci_of_metric_list(roc_aucs, metric_name=\"ROC AUC\", num_std=num_std)\n", + " \n", + "with open(f\"./model2metric_df.pkl\", \"wb\") as f:\n", + " pickle.dump(model2metric_df, f)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1eb8e9ab-ace6-458b-a596-eb503b3dc8f1", + "metadata": {}, + "outputs": [], + "source": [ + "model2metric_df = {k: v for k, v in model2metric_df.items() if 'Max' not in k}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4cf652e2-a0a0-435e-9e0d-80f04c6e1e17", + "metadata": {}, + "outputs": [], + "source": [ + "def plot_mean_with_95_ci(ax, data, metric, condition): \n", + " \n", + " metric_dict = {'aps': 'PR AUC', 'roc_aucs': 'ROC AUC'} \n", + " filtered_data = {k: v for k, v in data.items() if condition in k} \n", + " \n", + " means = [] \n", + " errors = [] \n", + " colors = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black'] \n", + " for model, df in filtered_data.items(): \n", + " mean = df[metric].mean() \n", + " std = df[metric].std() \n", + " ci = 1.96 * std \n", + " \n", + " means.append(mean) \n", + " errors.append(ci) \n", + " \n", + " y_pos = np.arange(len(filtered_data)) \n", + " \n", + " for i, model in enumerate(filtered_data.keys()): \n", + " ax.barh(y_pos[i], means[i], xerr=errors[i], color=colors[i], capsize=10, label=f'M{i}: {map_model_name(model)}') \n", + " \n", + " ax.set_yticks(y_pos) \n", + " ax.set_yticklabels(['M' + str(i) for i in range(len(filtered_data))]) \n", + " ax.set_xlabel(metric_dict[metric]) \n", + " ax.set_title(f'{metric_dict[metric]} for {condition} Prediction') \n", + "\n", + "conditions = ['x', 'y', 'z'] \n", + "metrics = ['aps', 'roc_aucs'] \n", + " \n", + "fig, axs = plt.subplots(3, 2, figsize=(10, 12)) \n", + " \n", + "for i, condition in enumerate(conditions): \n", + " for j, metric in enumerate(metrics): \n", + " plot_mean_with_95_ci(axs[i][j], model2metric_df, metric, condition) \n", + " \n", + "# Add a single legend for the entire plot \n", + "handles, labels = axs[0][0].get_legend_handles_labels() \n", + "fig.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.5, 1.05), \n", + " ncol=len(handles), fancybox=True, shadow=True) \n", + "\n", + "# Add a single title for the entire plot \n", + "fig.suptitle(\"Test Set Performance (1,000 Bootstrap Iterations)\", fontsize=14, y=1.07) \n", + " \n", + "plt.tight_layout() \n", + "plt.subplots_adjust(top=0.99) \n", + "plt.show() " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "619e6c0b-1aa3-4b90-bc1b-103b69d423c4", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.10 - SDK v2", + "language": "python", + "name": "python310-sdkv2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/long_roberta/main.py b/long_roberta/main.py new file mode 100644 index 0000000..312d24f --- /dev/null +++ b/long_roberta/main.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python +# coding: utf-8 + +""" +Main BERT classes and functions +""" + +import os +import yaml +import torch +import torch.nn as nn +import numpy as np +import pandas as pd +from dataclasses import dataclass +from transformers import PreTrainedTokenizerFast, AutoModel, AdamW +from transformers import AutoTokenizer, AutoModel, BertTokenizer, BertModel +from transformers.optimization import get_linear_schedule_with_warmup +from architecture import BERTSequenceClassificationArch +from base_model import Model +from custom_datasets import TokenizedDataset, collate_fn_pooled_tokens +from text_preprocessors import BERTTokenizer, BERTTokenizerPooled + +class BERTClassificationModel(Model): + def __init__(self): + super().__init__() + + with open("params.yml", "r") as stream: + params = yaml.safe_load(stream) + + self.params = params + tokenizer, bert = load_pretrained_model() + self.preprocessor = BERTTokenizer(tokenizer) + self.dataset_class = TokenizedDataset + self.nn = initialize_model(bert, self.params['device']) + self.optimizer = AdamW( + self.nn.parameters(), + lr=self.params['learning_rate'], + betas=(self.params['adam_beta1'], self.params['adam_beta2']), + weight_decay=self.params['weight_decay'], + eps=self.params['adam_epsilon'] + ) + self.scheduler = get_linear_schedule_with_warmup( + self.optimizer, + num_warmup_steps=self.params['warmup_steps'], + num_training_steps=1000000000000 + ) + + def evaluate_single_batch(self, batch, model, device): + + # Push the batch to gpu + batch = [t.to(device) for t in batch] + + # Predict + model_input = batch[:-1] + labels = batch[-1] + preds = model(*model_input).cpu() + labels = labels.float().cpu() + + return preds, labels + + +class BERTClassificationModelWithPooling(Model): + def __init__(self): + super().__init__() + + with open("params.yml", "r") as stream: + params = yaml.safe_load(stream) + + self.params = params + tokenizer, bert = load_pretrained_model() + self.preprocessor = BERTTokenizerPooled( + tokenizer, params['size'], params['step'], params['minimal_length'], params['max_num_segments'] + ) + self.dataset_class = TokenizedDataset + self.collate_fn = collate_fn_pooled_tokens + self.nn = initialize_model(bert, self.params['device']) + self.optimizer = AdamW( + self.nn.parameters(), + lr=self.params['learning_rate'], + betas=(self.params['adam_beta1'], self.params['adam_beta2']), + weight_decay=self.params['weight_decay'], + eps=self.params['adam_epsilon'] + ) + self.scheduler = get_linear_schedule_with_warmup( + self.optimizer, + num_warmup_steps=self.params['warmup_steps'], + num_training_steps=1000000000000 + ) + + def evaluate_single_batch(self, batch, model, device): + + # Extract elements from batch + input_ids = batch[0] + attention_mask = batch[1] + number_of_chunks = [len(x) for x in input_ids] + labels = batch[2] + + # Concatenate all input_ids into one batch + input_ids_combined = [] + for x in input_ids: + input_ids_combined.extend(x.tolist()) + + input_ids_combined_tensors = torch.stack( + [torch.tensor(x).to(device) for x in input_ids_combined]) + + # Concatenate all attention maska into one batch + attention_mask_combined = [] + for x in attention_mask: + attention_mask_combined.extend(x.tolist()) + attention_mask_combined_tensors = torch.stack( + [torch.tensor(x).to(device) for x in attention_mask_combined]) + + # Get model predictions for the combined batch + preds = model( + input_ids_combined_tensors, + attention_mask_combined_tensors + ) + + # Move predictions to CPU + preds = preds.cpu() + + if self.params['num_labels'] > 1: + + # Split result preds into chunks + preds_split = torch.split(preds, number_of_chunks) + + # Pooling - torch.max return tuples where the first element is the aggregate value + if self.params['pooling_strategy'] == 'mean': + pooled_preds = torch.stack([torch.mean(x, dim=0) for x in preds_split]) + elif self.params['pooling_strategy'] == 'max': + pooled_preds = torch.stack([torch.max(x, dim=0)[0] for x in preds_split]) + elif self.params['pooling_strategy'] == 'custom_agg': + c = self.params['custom_agg_c'] + pooled_preds = torch.stack([ + (torch.max(x, dim=0)[0] + torch.mean(x, dim=0) * number_of_chunks[i]/c) / (1 + number_of_chunks[i]/c) for i, x in enumerate(preds_split) + ]) + else: + raise ValueError(f"Expected pooling strategy to be one of ['mean', 'max', 'custom_agg'] but got {self.params['pooling_strategy']}.") + + else: + + # Flatten preds + preds = preds.flatten() + + # Split result preds into chunks + preds_split = torch.split(preds, number_of_chunks) + + # Pooling - torch.max return tuples where the first element is the aggregate value + if self.params['pooling_strategy'] == 'mean': + pooled_preds = torch.stack([torch.mean(x).reshape(1) for x in preds_split]) + elif self.params['pooling_strategy'] == 'max': + pooled_preds = torch.stack([torch.max(x).reshape(1) for x in preds_split]) + else: + raise ValueError(f"Expected pooling strategy to be one of ['mean', 'max'] but got {self.params['pooling_strategy']}.") + + # Move labels to CPU + labels_detached = torch.tensor(labels).float() + + return pooled_preds, labels_detached + +def load_pretrained_model(): + + tokenizer = load_tokenizer() + model = load_bert() + + return tokenizer, model + +def load_tokenizer(): + + with open("params.yml", "r") as stream: + params = yaml.safe_load(stream) + + tokenizer = AutoTokenizer.from_pretrained(params['tokenizer_path']) + + return tokenizer + +def load_bert(): + + with open("params.yml", "r") as stream: + params = yaml.safe_load(stream) + + model = AutoModel.from_pretrained( + params['bert_path'], + num_labels=params['num_labels'], + return_dict=True + ) + + return model + +def initialize_model(bert, device): + + model = BERTSequenceClassificationArch(bert) + model = model.to(device) + model = nn.DataParallel(model) + + return model diff --git a/long_roberta/metrics.py b/long_roberta/metrics.py new file mode 100644 index 0000000..a602fad --- /dev/null +++ b/long_roberta/metrics.py @@ -0,0 +1,238 @@ +#!/usr/bin/env python +# coding: utf-8 + +""" +Bootstrapped multi-label metrics functions for test set evaluation and +function to compute metrics during model training. +""" + +import torch +import numpy as np +import torch.nn.functional as F +from scipy import interp +from sklearn.metrics import ( + precision_recall_curve, + average_precision_score, + auc, + roc_curve, + f1_score, +) + +class BootstrapMultiLabelMetrics(object): + """ + Class containing methods for evaluating performance + of multi-label classifiers by bootstrapping the test set. + + :param labels: 2d numpy array of true labels + :type labels: :class:`numpy.ndarray` + :param preds: 2d numpy array of predicted probabilities for each label + :type preds: :class:`numpy.ndarray` + """ + + def __init__(self, labels, preds): + + self.labels = labels + self.preds = preds + + def assert_2d_array(self): + """ + Check that labels and preds are 2d numpy arrays. + """ + + assert_msg = "Make sure labels and preds are 2d numpy arrays. Use np.stack(array) if passing an array of arrays." + assert len(self.labels.shape) == len(self.preds.shape) == 2, assert_msg + + def get_bootstrapped_average_precision(self, n_bootstrap=1000): + """ + Bootstrap sample the predictions and labels to + compute micro and macro average precisions across all + labels with the average and standard deviation of + these values across all boostrap iterations. + + :return: micro_average_precision_mean_stdv, macro_average_precision_mean_stdv + :rtype: (dict, dict) + """ + + # Ensure labels and preds are 2d arrays + self.assert_2d_array() + + # Run bootstrap iterations + micro_average_precision_mean_stdv, macro_average_precision_mean_stdv = {}, {} + micro_average_precisions, macro_average_precisions = [], [] + for i in range(n_bootstrap): + + # Sample N records with replacement where N is the total number of records + sample_indices = np.random.choice(len(self.labels), len(self.labels)) + sample_labels = self.labels[sample_indices] + sample_preds = self.preds[sample_indices] + + micro_average_precision = average_precision_score( + sample_labels, sample_preds, average="micro" + ) + micro_average_precisions.append(micro_average_precision) + + macro_average_precision = average_precision_score( + sample_labels, sample_preds, average="macro" + ) + macro_average_precisions.append(macro_average_precision) + + # Compute means and stdvs + micro_average_precision_mean_stdv["mean"] = np.mean(micro_average_precisions) + micro_average_precision_mean_stdv["stdv"] = np.std(micro_average_precisions) + macro_average_precision_mean_stdv["mean"] = np.mean(macro_average_precisions) + macro_average_precision_mean_stdv["stdv"] = np.std(macro_average_precisions) + + return micro_average_precision_mean_stdv, macro_average_precision_mean_stdv + + def get_bootstrapped_roc_auc(self, n_bootstrap=1000): + """ + Bootstrap sample the predictions and labels to + compute micro and macro ROC AUC across all + labels with the average and standard deviation of + these values across all boostrap iterations. + + :return: micro_roc_auc_mean_stdv, macro_roc_auc_mean_stdv + :rtype: (dict, dict) + """ + + # Ensure labels and preds are 2d arrays + self.assert_2d_array() + + # Get number of classes + n_classes = self.labels.shape[1] + + # Run bootstrap iterations + micro_roc_auc_mean_stdv, macro_roc_auc_mean_stdv = {}, {} + micro_roc_aucs, macro_roc_aucs = [], [] + for i in range(n_bootstrap): + + # Sample N records with replacement where N is the total number of records + sample_indices = np.random.choice(len(self.labels), len(self.labels)) + sample_labels = self.labels[sample_indices] + sample_preds = self.preds[sample_indices] + + # Compute micro average ROC AUC + fpr_micro, tpr_micro, _ = roc_curve( + sample_labels.ravel(), sample_preds.ravel() + ) + micro_roc_auc = auc(fpr_micro, tpr_micro) + micro_roc_aucs.append(micro_roc_auc) + + # Compute fpr, tpr for each class + fpr, tpr = {}, {} + for i in range(n_classes): + fpr[i], tpr[i], _ = roc_curve(sample_labels[:, i], sample_preds[:, i]) + + # Compute macro-average ROC AUC using fprs and tprs + all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) + mean_tpr = np.zeros_like(all_fpr) + for i in range(n_classes): + mean_tpr += interp(all_fpr, fpr[i], tpr[i]) + mean_tpr /= n_classes + macro_roc_auc = auc(all_fpr, mean_tpr) + macro_roc_aucs.append(macro_roc_auc) + + # Compute means and stdvs + micro_roc_auc_mean_stdv["mean"] = np.mean(micro_roc_aucs) + micro_roc_auc_mean_stdv["stdv"] = np.std(micro_roc_aucs) + macro_roc_auc_mean_stdv["mean"] = np.mean(macro_roc_aucs) + macro_roc_auc_mean_stdv["stdv"] = np.std(macro_roc_aucs) + + return micro_roc_auc_mean_stdv, macro_roc_auc_mean_stdv + + def get_bootstrapped_f1(self, n_bootstrap=1000): + """ + Bootstrap sample the predictions and labels to + compute micro and macro F1 across all + labels with the average and standard deviation of + these values across all boostrap iterations. + + :return: micro_f1_mean_stdv, macro_f1_mean_stdv + :rtype: (dict, dict) + """ + + # Ensure labels and preds are 2d arrays + self.assert_2d_array() + + # Get number of classes + n_classes = self.labels.shape[1] + + # Run bootstrap iterations + threshold = 0.5 + micro_f1_mean_stdv, macro_f1_mean_stdv = {}, {} + micro_f1s, macro_f1s = [], [] + for i in range(n_bootstrap): + + # Sample N records with replacement where N is the total number of records + sample_indices = np.random.choice(len(self.labels), len(self.labels)) + sample_labels = self.labels[sample_indices] + sample_preds = self.preds[sample_indices] + + # Compute f1s + preds_at_threshold = np.array((sample_preds >= threshold), dtype=int) + micro_f1 = f1_score(sample_labels, preds_at_threshold, average="micro") + micro_f1s.append(micro_f1) + macro_f1 = f1_score(sample_labels, preds_at_threshold, average="macro") + macro_f1s.append(macro_f1) + + # Compute means and stdvs + micro_f1_mean_stdv["mean"] = np.mean(micro_f1s) + micro_f1_mean_stdv["stdv"] = np.std(micro_f1s) + macro_f1_mean_stdv["mean"] = np.mean(macro_f1s) + macro_f1_mean_stdv["stdv"] = np.std(macro_f1s) + + return micro_f1_mean_stdv, macro_f1_mean_stdv + + def get_all_bootstrapped_metrics_as_dict(self, n_bootstrap=1000): + """ + Returns all bootstrapped metrics in a nice dictionary. + :return: metrics_dict + :rtype: dict + """ + + micro_average_precision_mean_stdv, macro_average_precision_mean_stdv = self.get_bootstrapped_average_precision(n_bootstrap=n_bootstrap) + micro_roc_auc_mean_stdv, macro_roc_auc_mean_stdv = self.get_bootstrapped_roc_auc(n_bootstrap=n_bootstrap) + micro_f1_mean_stdv, macro_f1_mean_stdv = self.get_bootstrapped_f1(n_bootstrap=n_bootstrap) + + metrics_dict = {} + metrics_dict["micro_ap_mean"] = micro_average_precision_mean_stdv["mean"] + metrics_dict["micro_ap_stdv"] = micro_average_precision_mean_stdv["stdv"] + metrics_dict["macro_ap_mean"] = macro_average_precision_mean_stdv["mean"] + metrics_dict["macro_ap_stdv"] = macro_average_precision_mean_stdv["stdv"] + metrics_dict["micro_roc_auc_mean"] = micro_roc_auc_mean_stdv["mean"] + metrics_dict["micro_roc_auc_stdv"] = micro_roc_auc_mean_stdv["stdv"] + metrics_dict["macro_roc_auc_mean"] = macro_roc_auc_mean_stdv["mean"] + metrics_dict["macro_roc_auc_stdv"] = macro_roc_auc_mean_stdv["stdv"] + metrics_dict["micro_f1_mean"] = micro_f1_mean_stdv["mean"] + metrics_dict["micro_f1_stdv"] = micro_f1_mean_stdv["stdv"] + metrics_dict["macro_f1_mean"] = macro_f1_mean_stdv["mean"] + metrics_dict["macro_f1_stdv"] = macro_f1_mean_stdv["stdv"] + + return metrics_dict + +def compute_training_metrics(pred, threshold=0.5): + """ + Returns dictionary of metrics computed during training + :return: training_metrics + :rtype: dict + """ + + # Compute f1s + labels = pred.label_ids + preds_at_threshold = np.array((pred.predictions >= threshold), dtype=int) + micro_f1 = f1_score(labels, preds_at_threshold, average="micro") + macro_f1 = f1_score(labels, preds_at_threshold, average="macro") + + # Compute loss + y_prob = torch.tensor(pred.predictions) + y_true = torch.tensor(labels).type_as(y_prob) + loss = F.binary_cross_entropy_with_logits(y_prob, y_true).numpy().item() + + # Build metrics dict + training_metrics = { + 'micro_f1': micro_f1, + 'macro_f1': macro_f1, + 'loss': loss + } + + return training_metrics diff --git a/long_roberta/params.yml b/long_roberta/params.yml new file mode 100644 index 0000000..86f7dde --- /dev/null +++ b/long_roberta/params.yml @@ -0,0 +1,49 @@ +# Data +'dataset_path': 'text_label.hf' + +# Pretrained LM +'tokenizer_path': 'roberta-base' +'bert_path': 'roberta-base' + +# Output LM +'output_path': 'roberta_base_text_only_mean/' + +# Model Name +'model_name': 'roberta_base_text_only_mean' + +# Load from file +'model_load_from_file': False + +# Pooled BERT Parameters +'use_pooled_bert': True +'pooling_strategy': 'mean' # one of ['mean', 'max', 'custom_agg'] +'custom_agg_c': 2 +'size': 510 +'step': 100 +'minimal_length': 1 +'max_num_segments': 5 # Each segment adds another (size - step) bits of information. For 2048 seq len: 5 * (510 - 100) = 2050 + +# Linear layer dim +'linear_dim': 768 #1024 + +# Training Parameters +'epochs': 10000000 +'early_stopping_epochs': 5 +'batch_size': 8 # Warning, from a memory consumption perspective, batches are ragged. This is the min # of chunks used in a forward pass. +'accumulation_steps': 16 # Because of the above, we don't exactly know the effective batch size. +'learning_rate': 0.00005 +'num_labels': 1 +'adam_beta1': 0.9 +'adam_beta2': 0.999 +'adam_epsilon': 0.00000001 +'warmup_steps': 100 +'weight_decay': 0.01 +'seed': 1111 + +# Devices +'device': 'cuda' +'visible_gpus': "0" #"0,1,2,3,4,5,6,7" + +# Test data +test_with_imdb_data: False +imdb_data: 'sample_data/imdb_kaggle.csv' diff --git a/long_roberta/params_example.yml b/long_roberta/params_example.yml new file mode 100644 index 0000000..a43887c --- /dev/null +++ b/long_roberta/params_example.yml @@ -0,0 +1,49 @@ +# Data +'dataset_path': 'text_label.hf' + +# Pretrained LM +'tokenizer_path': 'roberta_512/' +'bert_path': 'checkpoint-500000/' + +# Output LM +'output_path': 'text_only_mean/' + +# Model Name +'model_name': 'text_only_mean' + +# Load from file +'model_load_from_file': False + +# Pooled BERT Parameters +'use_pooled_bert': True +'pooling_strategy': 'mean' # one of ['mean', 'max', 'custom_agg'] +'custom_agg_c': 2 +'size': 510 +'step': 100 +'minimal_length': 1 +'max_num_segments': 5 # Each segment adds another (size - step) bits of information. For 2048 seq len: 5 * (510 - 100) = 2050 + +# Linear layer dim +'linear_dim': 768 #1024 + +# Training Parameters +'epochs': 10000000 +'early_stopping_epochs': 5 +'batch_size': 8 # Warning, from a memory consumption perspective, batches are ragged. This is the min # of chunks used in a forward pass. +'accumulation_steps': 16 # Because of the above, we don't exactly know the effective batch size. +'learning_rate': 0.00005 +'num_labels': 1 +'adam_beta1': 0.9 +'adam_beta2': 0.999 +'adam_epsilon': 0.00000001 +'warmup_steps': 100 +'weight_decay': 0.01 +'seed': 1111 + +# Devices +'device': 'cuda' +'visible_gpus': "0" #"0,1,2,3,4,5,6,7" + +# Test data +test_with_imdb_data: False +imdb_data: 'sample_data/imdb_kaggle.csv' diff --git a/long_roberta/pooling.py b/long_roberta/pooling.py new file mode 100644 index 0000000..d3f36a8 --- /dev/null +++ b/long_roberta/pooling.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python +# coding: utf-8 + +""" +Functions for preparing input for longer texts - based on +https://www.kdnuggets.com/2021/04/apply-transformers-any-length-text.html +""" + +import torch + + +def tokenize_all_text(text, tokenizer): + """ + Tokenizes the entire text without truncation and without special tokens + + Parameters: + text - single str with arbitrary length + tokenizer - object of class transformers.PreTrainedTokenizerFast + + Returns: + tokens - dictionary of the form + { + 'input_ids' : [...] + 'token_type_ids' : [...] + 'attention_mask' : [...] + } + """ + + tokens = tokenizer.encode_plus(text, add_special_tokens=False, + return_tensors='pt') + return tokens + + +def split_overlapping(array, size, step, minimal_length): + """ + Helper function for dividing arrays into overlapping chunks + """ + + result = [array[i:i + size] for i in range(0, len(array), step)] + if len(result) > 1: + + # Ignore chunks with less then minimal_length number of tokens + result = [x for x in result if len(x) >= minimal_length] + + return result + +def split_tokens_into_smaller_chunks(tokens, size, step, minimal_length): + """ + Splits tokens into overlapping chunks with given size and step + """ + + assert size <= 510 + input_id_chunks = split_overlapping( + tokens['input_ids'][0], size, step, minimal_length) + mask_chunks = split_overlapping( + tokens['attention_mask'][0], size, step, minimal_length) + + return input_id_chunks, mask_chunks + +def add_special_tokens_at_beginning_and_end(input_id_chunks, mask_chunks, tokenizer): + """ + Adds special CLS token at the beginning and SEP token at the end of each chunk + """ + + for i in range(len(input_id_chunks)): + input_id_chunks[i] = torch.cat( + [torch.Tensor([tokenizer.cls_token_id]), input_id_chunks[i], torch.Tensor([tokenizer.mask_token_id])]) + mask_chunks[i] = torch.cat( + [torch.Tensor([1]), mask_chunks[i], torch.Tensor([1])]) + +def add_padding_tokens(input_id_chunks, mask_chunks, tokenizer): + """ + Adds padding tokens at the end to make sure that all chunks have exactly 512 tokens + """ + + for i in range(len(input_id_chunks)): + + # get required padding length + pad_len = 512 - input_id_chunks[i].shape[0] + + # check if tensor length satisfies required chunk size + if pad_len > 0: + + # if padding length is more than 0, we must add padding + input_id_chunks[i] = torch.cat([ + input_id_chunks[i], torch.Tensor([tokenizer.pad_token_id] * pad_len) + ]) + mask_chunks[i] = torch.cat([ + mask_chunks[i], torch.Tensor([tokenizer.pad_token_id] * pad_len) + ]) + +def stack_tokens_from_all_chunks(input_id_chunks, mask_chunks): + """ + Reshapes data to a form compatible with BERT model input + """ + + input_ids = torch.stack(input_id_chunks) + attention_mask = torch.stack(mask_chunks) + + return input_ids.long(), attention_mask.int() + + +def transform_text_to_model_input( + text, + tokenizer, + size=510, + step=510, + minimal_length=100): + """ + Transforms the entire text to model input of BERT model + """ + + tokens = tokenize_all_text(text, tokenizer) + input_id_chunks, mask_chunks = split_tokens_into_smaller_chunks( + tokens, size, step, minimal_length) + add_special_tokens_at_beginning_and_end(input_id_chunks, mask_chunks, tokenizer) + add_padding_tokens(input_id_chunks, mask_chunks, tokenizer) + input_ids, attention_mask = stack_tokens_from_all_chunks(input_id_chunks, mask_chunks) + + return [input_ids, attention_mask] diff --git a/long_roberta/prepare_datasets.ipynb b/long_roberta/prepare_datasets.ipynb new file mode 100644 index 0000000..52a100f --- /dev/null +++ b/long_roberta/prepare_datasets.ipynb @@ -0,0 +1,390 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "19b33b3a-5ac1-4a50-a13f-c5810869628a", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/anaconda/envs/azureml_py310_sdkv2/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], + "source": [ + "import os\n", + "import re\n", + "import gc\n", + "import multiprocessing\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "from datasets import load_from_disk, Dataset, DatasetDict \n", + "from sklearn.model_selection import train_test_split\n", + "from transformers import AutoTokenizer" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "be7b591c-ceb8-41ab-b2ec-70d88c5b30b2", + "metadata": {}, + "outputs": [], + "source": [ + "data_dir = \"nlp_classification_tasks/\"\n", + "conditions = [\"x\", \"y\", \"z\"]\n", + "time_slice = \"a\"" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "2117c1c8-42ff-4a74-9ea1-936ad9b52089", + "metadata": {}, + "outputs": [], + "source": [ + "val_size = 0.1\n", + "seed = 22\n", + "cols = [\"text\", \"label\"]\n", + "max_seq_len = 8192" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "3947b18d-102c-4ab6-af49-4fced0dc9dfb", + "metadata": {}, + "outputs": [], + "source": [ + "bioclinroberta_path = 'RoBERTa-base-PM-M3-Voc-distill-align-hf/'\n", + "tokenizer = AutoTokenizer.from_pretrained(bioclinroberta_path)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "17b24dda-6a78-45ef-851a-239251ad27b3", + "metadata": {}, + "outputs": [], + "source": [ + "def plot_token_len_distribution(dataset):\n", + "\n", + " plt.hist([len(ids) for ids in dataset['input_ids']], bins=50)\n", + " plt.ylabel(\"Frequency\")\n", + " plt.xlabel(\"Tokens per Patient\")\n", + " plt.title(\"Distribution of Tokens per Patient\")\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "91c99dec-d620-4816-abab-7dd96f44562e", + "metadata": {}, + "outputs": [], + "source": [ + "def tokenize_text(record, tokenizer, truncate_to):\n", + " \n", + " return {\n", + " 'input_ids': tokenizer(\n", + " record['text'],\n", + " padding=False,\n", + " truncation=True,\n", + " max_length=truncate_to\n", + " )['input_ids']\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "727a5878-b361-45c2-b929-ebedf868e14f", + "metadata": {}, + "outputs": [], + "source": [ + "def tokenize_plot(dataset, tokenizer, truncate_to, batch_size=512, proc_div=2):\n", + " \n", + " num_proc = int(multiprocessing.cpu_count() / proc_div)\n", + " print(f\"Tokenizing with {num_proc} CPU processes...\")\n", + " \n", + " dataset = dataset.map(\n", + " tokenize_text,\n", + " batched=True,\n", + " batch_size=batch_size,\n", + " fn_kwargs={\n", + " \"tokenizer\": tokenizer,\n", + " \"truncate_to\": truncate_to\n", + " },\n", + " num_proc=num_proc\n", + " )\n", + " \n", + " plot_token_len_distribution(dataset)\n", + " dataset.remove_columns('input_ids')\n", + " gc.collect()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "87f84d55-6aa7-47b0-9b44-f08b0bd17443", + "metadata": {}, + "outputs": [], + "source": [ + "def split_on_sole_pipe(input_string): \n", + " \n", + " return re.split(r'(?" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Saving the dataset (1/1 shards): 100%|██████████| 73248/73248 [00:03<00:00, 21244.96 examples/s]\n", + "Saving the dataset (1/1 shards): 100%|██████████| 8139/8139 [00:00<00:00, 26572.39 examples/s]\n", + "Saving the dataset (1/1 shards): 100%|██████████| 28898/28898 [00:01<00:00, 22993.47 examples/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing data for frailty...\n", + "Train size: 41978\n", + "Val size: 4665\n", + "Test size: 4483\n", + "Tokenizing with 12 CPU processes...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Map (num_proc=12): 100%|██████████| 41978/41978 [00:07<00:00, 5806.34 examples/s] \n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAk0AAAHHCAYAAACiOWx7AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABHq0lEQVR4nO3deVgVdf//8dcBZFEE3ABJBdz3JS3FJTNIVLJcum81SzTaTMstTVvcWqxM01brrqRuK83uMtNCcS8zSxO3ckepZDEVEEtU+Pz+8Md8PeIyIsrS83Fd58qZeZ+Z92fOAV7NmZnjMMYYAQAA4KJciroBAACAkoDQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQU0MSJE+VwOK7Jtm6++WbdfPPN1vSqVavkcDj02WefXZPtDxw4UCEhIddkWwWVlZWl++67T4GBgXI4HBo+fPhV3V7e6//nn39e1e3g8pWE9ytKJkITICk2NlYOh8N6eHp6KigoSJGRkXr11Vd17NixQtnOwYMHNXHiRCUkJBTK+gpTce7Njueff16xsbEaPHiw/vvf/+qee+7JV5MXdC71ODugovAMHDjQaT/7+PioWbNmmjZtmrKzsy9rXUX9fv3ll180ceJE7d+/v0i2j6LhVtQNAMXJ5MmTFRoaqlOnTiklJUWrVq3S8OHDNX36dC1cuFBNmza1ap966imNHTv2stZ/8OBBTZo0SSEhIWrevLnt5y1duvSytlMQF+vtP//5j3Jzc696D1dixYoVatOmjSZMmHDBml69eql27drWdFZWlgYPHqyePXuqV69e1vyAgICr2us/mYeHh959911JUnp6uv73v//pscce008//aS5c+faXk9Rv19/+eUXTZo0STfffDNHtf5BCE3AWbp27apWrVpZ0+PGjdOKFSt022236fbbb9evv/4qLy8vSZKbm5vc3K7uj9Bff/2lsmXLyt3d/apu51LKlClTpNu3Iy0tTQ0bNrxoTdOmTZ2C759//qnBgweradOmuvvuu692i6WeMUYnTpywfkbOx83NzWlfP/zww2rdurXmzZun6dOnKygo6Ir7KAnvV5RMfDwHXMItt9yip59+WgcOHNCcOXOs+ec7pyk+Pl7t27eXn5+fvL29Va9ePT3xxBOSzpyHdMMNN0iSBg0aZH1EERsbK+nMeUuNGzfWxo0bddNNN6ls2bLWc889pylPTk6OnnjiCQUGBqpcuXK6/fbb9dtvvznVhISEaODAgfmee/Y6L9Xb+c4ROX78uEaNGqXq1avLw8ND9erV08svvyxjjFOdw+HQ0KFDtWDBAjVu3FgeHh5q1KiR4uLizr/Dz5GWlqaYmBgFBATI09NTzZo10wcffGAtzzu/KzExUYsXL7Z6v5KPTVasWKEOHTqoXLly8vPz0x133KFff/31ks87cOCAateurcaNGys1NVXSmaMpw4cPt/ZT7dq19eKLLzodCdm/f78cDodefvllvfPOO6pVq5Y8PDx0ww036KeffnLaRkpKigYNGqRq1arJw8NDVatW1R133HHJ8Q4cOFDe3t7at2+fIiMjVa5cOQUFBWny5Mn5XrPc3FzNmDFDjRo1kqenpwICAvTggw/q6NGjTnUhISG67bbbtGTJErVq1UpeXl56++23L7mfzubi4mK9D/fv368jR47oscceU5MmTeTt7S0fHx917dpVmzdvtp5TkPfr5Y7pu+++04033ihPT0/VrFlTH374oVUTGxurf/3rX5KkTp06WdtftWrVZY0dJQ9HmgAb7rnnHj3xxBNaunSp7r///vPWbN++XbfddpuaNm2qyZMny8PDQ3v27NHatWslSQ0aNNDkyZM1fvx4PfDAA+rQoYMkqW3bttY6Dh8+rK5du6pv3766++67L/kx0XPPPSeHw6HHH39caWlpmjFjhiIiIpSQkHDR/9s/l53ezmaM0e23366VK1cqJiZGzZs315IlSzR69Gj98ccfeuWVV5zqv/vuO33++ed6+OGHVb58eb366qvq3bu3kpKSVKlSpQv29ffff+vmm2/Wnj17NHToUIWGhmr+/PkaOHCg0tPTNWzYMDVo0ED//e9/NWLECFWrVk2jRo2SJFWpUsX2+M+2bNkyde3aVTVr1tTEiRP1999/67XXXlO7du30888/X/CjmL179+qWW25RxYoVFR8fr8qVK+uvv/5Sx44d9ccff+jBBx9UjRo19P3332vcuHFKTk7WjBkznNbx8ccf69ixY3rwwQflcDj00ksvqVevXtq3b5919KR3797avn27HnnkEYWEhCgtLU3x8fFKSkq65MdEOTk56tKli9q0aaOXXnpJcXFxmjBhgk6fPq3JkydbdQ8++KBiY2M1aNAgPfroo0pMTNTrr7+uTZs2ae3atU5Hcnbu3Kl+/frpwQcf1P3336969epd9j7fu3evJKlSpUrat2+fFixYoH/9618KDQ1Vamqq3n77bXXs2FG//PKLgoKCLvv9erlj2rNnj+68807FxMQoOjpa77//vgYOHKiWLVuqUaNGuummm/Too4/q1Vdf1RNPPKEGDRpIkvVflGIGgJk9e7aRZH766acL1vj6+poWLVpY0xMmTDBn/wi98sorRpI5dOjQBdfx008/GUlm9uzZ+ZZ17NjRSDKzZs0677KOHTta0ytXrjSSzHXXXWcyMzOt+Z9++qmRZGbOnGnNCw4ONtHR0Zdc58V6i46ONsHBwdb0ggULjCTz7LPPOtXdeeedxuFwmD179ljzJBl3d3eneZs3bzaSzGuvvZZvW2ebMWOGkWTmzJljzTt58qQJCwsz3t7eTmMPDg42UVFRF13fuQ4dOmQkmQkTJljzmjdvbvz9/c3hw4ed+nVxcTEDBgyw5uW9/ocOHTK//vqrCQoKMjfccIM5cuSIVfPMM8+YcuXKmV27djltd+zYscbV1dUkJSUZY4xJTEw0kkylSpWcnv/ll18aSearr74yxhhz9OhRI8lMnTr1ssZpzJnXUJJ55JFHrHm5ubkmKirKuLu7W+/bb7/91kgyH330kdPz4+Li8s0PDg42kkxcXJztHsqVK2cOHTpkDh06ZPbs2WOef/5543A4TNOmTY0xxpw4ccLk5OQ4PS8xMdF4eHiYyZMnW/Mu5/1akDGtWbPGmpeWlmY8PDzMqFGjrHnz5883kszKlSttjR2lAx/PATZ5e3tf9Co6Pz8/SdKXX35Z4JNQPTw8NGjQINv1AwYMUPny5a3pO++8U1WrVtXXX39doO3b9fXXX8vV1VWPPvqo0/xRo0bJGKNvvvnGaX5ERIRq1aplTTdt2lQ+Pj7at2/fJbcTGBiofv36WfPKlCmjRx99VFlZWVq9enUhjOb/JCcnKyEhQQMHDlTFihWd+r311lvPu1+3bdumjh07KiQkRMuWLVOFChWsZfPnz1eHDh1UoUIF/fnnn9YjIiJCOTk5WrNmjdO6+vTp4/T8vCMoefvJy8tL7u7uWrVqVb6PlewaOnSo9e+8j05PnjypZcuWWT37+vrq1ltvdeq5ZcuW8vb21sqVK53WFxoaqsjISNvbP378uKpUqaIqVaqodu3aeuKJJxQWFqYvvvhC0pmfAReXM3+acnJydPjwYeuj7p9//rlAY77cMTVs2NDa99KZo5b16tW75PsVpR8fzwE2ZWVlyd/f/4LL+/Tpo3fffVf33Xefxo4dq/DwcPXq1Ut33nmn9UfgUq677rrLOum7Tp06TtMOh0O1a9e+6pdBHzhwQEFBQU6BTfq/jycOHDjgNL9GjRr51lGhQoVL/uE/cOCA6tSpk2//XWg7Vypvfef7iKlBgwZasmSJjh8/rnLlylnzu3fvroCAAC1ZskTe3t5Oz9m9e7e2bNlywY8K09LSnKbP3U95ASpvP3l4eOjFF1/UqFGjFBAQoDZt2ui2227TgAEDFBgYeMnxubi4qGbNmk7z6tatK0nWe2b37t3KyMi44Hv93J5DQ0Mvud2zeXp66quvvrLGExoaqmrVqlnLc3NzNXPmTL355ptKTExUTk6OtexiH+VezOWOqaDvV5R+hCbAht9//10ZGRlOl6ufy8vLS2vWrNHKlSu1ePFixcXFad68ebrlllu0dOlSubq6XnI7l3Mekl0XugFnTk6OrZ4Kw4W2Y845Abkk6t27tz744AN99NFHevDBB52W5ebm6tZbb9WYMWPO+9y8wJLHzn4aPny4unfvrgULFmjJkiV6+umnNWXKFK1YsUItWrS4wtGc6dnf318fffTReZefGwAv9z3r6uqqiIiICy5//vnn9fTTT+vee+/VM888o4oVK8rFxUXDhw8v8BHcyx1TaX6/4soQmgAb/vvf/0rSJT+GcHFxUXh4uMLDwzV9+nQ9//zzevLJJ7Vy5UpFREQU+h3Ed+/e7TRtjNGePXucLquvUKGC0tPT8z33wIEDTkcdLqe34OBgLVu2TMeOHXM62rRjxw5reWEIDg7Wli1blJub63S0qbC3c/b2pDMnN59rx44dqly5stNRJkmaOnWq3NzcrJPc77rrLmtZrVq1lJWVddGQUBC1atXSqFGjNGrUKO3evVvNmzfXtGnTnK7uPJ/c3Fzt27fPKazt2rVLkqyTyGvVqqVly5apXbt2VyXEX8pnn32mTp066b333nOan56ersqVK1vTl/N+vRpjulbfBoDihXOagEtYsWKFnnnmGYWGhqp///4XrDty5Ei+eXk33cu723HeH9zzhZiC+PDDD53Os/rss8+UnJysrl27WvNq1aqlH374QSdPnrTmLVq0KN+tCS6nt27duiknJ0evv/660/xXXnlFDofDaftXolu3bkpJSdG8efOseadPn9Zrr70mb29vdezYsVC2k6dq1apq3ry5PvjgA6f9sG3bNi1dulTdunXL9xyHw6F33nlHd955p6Kjo7Vw4UJr2b///W+tW7dOS5Ysyfe89PR0nT59+rL6++uvv3TixAmnebVq1VL58uVt31H77NfMGKPXX39dZcqUUXh4uNVzTk6OnnnmmXzPPX36dKG9dy/E1dU13xGd+fPn648//nCadznv16sxpsL+WUbJwJEm4CzffPONduzYodOnTys1NVUrVqxQfHy8goODtXDhQnl6el7wuZMnT9aaNWsUFRWl4OBgpaWl6c0331S1atXUvn17SWf+wPn5+WnWrFkqX768ypUrp9atW1/2eSF5KlasqPbt22vQoEFKTU3VjBkzVLt2bafbItx333367LPP1KVLF/373//W3r17NWfOHKcTsy+3t+7du6tTp0568skntX//fjVr1kxLly7Vl19+qeHDh+dbd0E98MADevvttzVw4EBt3LhRISEh+uyzz7R27VrNmDEj3zlVhWHq1Knq2rWrwsLCFBMTY91ywNfXVxMnTjzvc1xcXDRnzhz16NFD//73v/X111/rlltu0ejRo7Vw4ULddttt1iXrx48f19atW/XZZ59p//79TkdPLmXXrl0KDw/Xv//9bzVs2FBubm764osvlJqaqr59+17y+Z6enoqLi1N0dLRat26tb775RosXL9YTTzxhfUTVsWNHPfjgg5oyZYoSEhLUuXNnlSlTRrt379b8+fM1c+ZM3XnnnbZ7vly33XabJk+erEGDBqlt27baunWrPvroo3znYl3O+/VqjKl58+ZydXXViy++qIyMDHl4eOiWW2656HmPKAWK8Mo9oNjIu+VA3sPd3d0EBgaaW2+91cycOdPp0vY8595yYPny5eaOO+4wQUFBxt3d3QQFBZl+/frlu9z8yy+/NA0bNjRubm5Ol0x37NjRNGrU6Lz9XeiWA5988okZN26c8ff3N15eXiYqKsocOHAg3/OnTZtmrrvuOuPh4WHatWtnNmzYkG+dF+vt3Eu4jTHm2LFjZsSIESYoKMiUKVPG1KlTx0ydOtXk5uY61UkyQ4YMydfThW6FcK7U1FQzaNAgU7lyZePu7m6aNGly3svMC+uWA8YYs2zZMtOuXTvj5eVlfHx8TPfu3c0vv/ziVHP2LQfy/PXXX6Zjx47G29vb/PDDD8aYM/tp3Lhxpnbt2sbd3d1UrlzZtG3b1rz88svm5MmTxpj/u+XA+W4lcHZ/f/75pxkyZIipX7++KVeunPH19TWtW7c2n3766SXHmne5/969e03nzp1N2bJlTUBAgJkwYUK+S/yNMeadd94xLVu2NF5eXqZ8+fKmSZMmZsyYMebgwYNWzeXu87weLubEiRNm1KhRpmrVqsbLy8u0a9fOrFu37orfr1c6pvNt/z//+Y+pWbOmcXV15fYD/xAOYzizDQBKu4EDB+qzzz5TVlZWUbcClFic0wQAAGADoQkAAMAGQhMAAIANnNMEAABgA0eaAAAAbCA0AQAA2MDNLQtJbm6uDh48qPLly3N7fQAASghjjI4dO6agoKBLfrk6oamQHDx4UNWrVy/qNgAAQAH89ttvqlat2kVrCE2FJO/rHH777Tf5+PgUcTcAAMCOzMxMVa9e3dbXMhGaCkneR3I+Pj6EJgAAShg7p9ZwIjgAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYINbUTcAe0LGLr5kzf4Xoq5BJwAA/DNxpAkAAMAGQhMAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhQpKFpypQpuuGGG1S+fHn5+/urR48e2rlzp1PNiRMnNGTIEFWqVEne3t7q3bu3UlNTnWqSkpIUFRWlsmXLyt/fX6NHj9bp06edalatWqXrr79eHh4eql27tmJjY/P188YbbygkJESenp5q3bq1fvzxx0IfMwAAKJmKNDStXr1aQ4YM0Q8//KD4+HidOnVKnTt31vHjx62aESNG6KuvvtL8+fO1evVqHTx4UL169bKW5+TkKCoqSidPntT333+vDz74QLGxsRo/frxVk5iYqKioKHXq1EkJCQkaPny47rvvPi1ZssSqmTdvnkaOHKkJEybo559/VrNmzRQZGam0tLRrszMAAECx5jDGmKJuIs+hQ4fk7++v1atX66abblJGRoaqVKmijz/+WHfeeackaceOHWrQoIHWrVunNm3a6JtvvtFtt92mgwcPKiAgQJI0a9YsPf744zp06JDc3d31+OOPa/Hixdq2bZu1rb59+yo9PV1xcXGSpNatW+uGG27Q66+/LknKzc1V9erV9cgjj2js2LGX7D0zM1O+vr7KyMiQj49PYe8ahYxdfMma/S9EFfp2AQAozS7n73exOqcpIyNDklSxYkVJ0saNG3Xq1ClFRERYNfXr11eNGjW0bt06SdK6devUpEkTKzBJUmRkpDIzM7V9+3ar5ux15NXkrePkyZPauHGjU42Li4siIiKsmnNlZ2crMzPT6QEAAEqvYhOacnNzNXz4cLVr106NGzeWJKWkpMjd3V1+fn5OtQEBAUpJSbFqzg5Mecvzll2sJjMzU3///bf+/PNP5eTknLcmbx3nmjJlinx9fa1H9erVCzZwAABQIhSb0DRkyBBt27ZNc+fOLepWbBk3bpwyMjKsx2+//VbULQEAgKvIragbkKShQ4dq0aJFWrNmjapVq2bNDwwM1MmTJ5Wenu50tCk1NVWBgYFWzblXueVdXXd2zblX3KWmpsrHx0deXl5ydXWVq6vreWvy1nEuDw8PeXh4FGzAAACgxCnSI03GGA0dOlRffPGFVqxYodDQUKflLVu2VJkyZbR8+XJr3s6dO5WUlKSwsDBJUlhYmLZu3ep0lVt8fLx8fHzUsGFDq+bsdeTV5K3D3d1dLVu2dKrJzc3V8uXLrRoAAPDPVqRHmoYMGaKPP/5YX375pcqXL2+dP+Tr6ysvLy/5+voqJiZGI0eOVMWKFeXj46NHHnlEYWFhatOmjSSpc+fOatiwoe655x699NJLSklJ0VNPPaUhQ4ZYR4Ieeughvf766xozZozuvfderVixQp9++qkWL/6/K9JGjhyp6OhotWrVSjfeeKNmzJih48ePa9CgQdd+xwAAgGKnSEPTW2+9JUm6+eabnebPnj1bAwcOlCS98sorcnFxUe/evZWdna3IyEi9+eabVq2rq6sWLVqkwYMHKywsTOXKlVN0dLQmT55s1YSGhmrx4sUaMWKEZs6cqWrVqundd99VZGSkVdOnTx8dOnRI48ePV0pKipo3b664uLh8J4cDAIB/pmJ1n6aSjPs0AQBQ8pTY+zQBAAAUV4QmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMCGIg1Na9asUffu3RUUFCSHw6EFCxY4LR84cKAcDofTo0uXLk41R44cUf/+/eXj4yM/Pz/FxMQoKyvLqWbLli3q0KGDPD09Vb16db300kv5epk/f77q168vT09PNWnSRF9//XWhjxcAAJRcRRqajh8/rmbNmumNN964YE2XLl2UnJxsPT755BOn5f3799f27dsVHx+vRYsWac2aNXrggQes5ZmZmercubOCg4O1ceNGTZ06VRMnTtQ777xj1Xz//ffq16+fYmJitGnTJvXo0UM9evTQtm3bCn/QAACgRHIYY0xRNyFJDodDX3zxhXr06GHNGzhwoNLT0/Mdgcrz66+/qmHDhvrpp5/UqlUrSVJcXJy6deum33//XUFBQXrrrbf05JNPKiUlRe7u7pKksWPHasGCBdqxY4ckqU+fPjp+/LgWLVpkrbtNmzZq3ry5Zs2aZav/zMxM+fr6KiMjQz4+PgXYAxcXMnbxJWv2vxBV6NsFAKA0u5y/38X+nKZVq1bJ399f9erV0+DBg3X48GFr2bp16+Tn52cFJkmKiIiQi4uL1q9fb9XcdNNNVmCSpMjISO3cuVNHjx61aiIiIpy2GxkZqXXr1l3NoQEAgBLEragbuJguXbqoV69eCg0N1d69e/XEE0+oa9euWrdunVxdXZWSkiJ/f3+n57i5ualixYpKSUmRJKWkpCg0NNSpJiAgwFpWoUIFpaSkWPPOrslbx/lkZ2crOzvbms7MzLyisQIAgOKtWIemvn37Wv9u0qSJmjZtqlq1amnVqlUKDw8vws6kKVOmaNKkSUXaAwAAuHaK/cdzZ6tZs6YqV66sPXv2SJICAwOVlpbmVHP69GkdOXJEgYGBVk1qaqpTTd70pWrylp/PuHHjlJGRYT1+++23KxscAAAo1kpUaPr99991+PBhVa1aVZIUFham9PR0bdy40apZsWKFcnNz1bp1a6tmzZo1OnXqlFUTHx+vevXqqUKFClbN8uXLnbYVHx+vsLCwC/bi4eEhHx8fpwcAACi9ijQ0ZWVlKSEhQQkJCZKkxMREJSQkKCkpSVlZWRo9erR++OEH7d+/X8uXL9cdd9yh2rVrKzIyUpLUoEEDdenSRffff79+/PFHrV27VkOHDlXfvn0VFBQkSbrrrrvk7u6umJgYbd++XfPmzdPMmTM1cuRIq49hw4YpLi5O06ZN044dOzRx4kRt2LBBQ4cOveb7BAAAFE9FGpo2bNigFi1aqEWLFpKkkSNHqkWLFho/frxcXV21ZcsW3X777apbt65iYmLUsmVLffvtt/Lw8LDW8dFHH6l+/foKDw9Xt27d1L59e6d7MPn6+mrp0qVKTExUy5YtNWrUKI0fP97pXk5t27bVxx9/rHfeeUfNmjXTZ599pgULFqhx48bXbmcAAIBirdjcp6mk4z5NAACUPKXqPk0AAADFQYFC0759+wq7DwAAgGKtQKGpdu3a6tSpk+bMmaMTJ04Udk8AAADFToFC088//6ymTZtq5MiRCgwM1IMPPqgff/yxsHsDAAAoNgoUmpo3b66ZM2fq4MGDev/995WcnKz27durcePGmj59ug4dOlTYfQIAABSpKzoR3M3NTb169dL8+fP14osvas+ePXrsscdUvXp1DRgwQMnJyYXVJwAAQJG6otC0YcMGPfzww6pataqmT5+uxx57THv37lV8fLwOHjyoO+64o7D6BAAAKFIF+sLe6dOna/bs2dq5c6e6deumDz/8UN26dZOLy5kMFhoaqtjYWIWEhBRmrwAAAEWmQKHprbfe0r333quBAwda3wN3Ln9/f7333ntX1BwAAEBxUaDQtHv37kvWuLu7Kzo6uiCrBwAAKHYKdE7T7NmzNX/+/Hzz58+frw8++OCKmwIAAChuChSapkyZosqVK+eb7+/vr+eff/6KmwIAAChuChSakpKSFBoamm9+cHCwkpKSrrgpAACA4qZAocnf319btmzJN3/z5s2qVKnSFTcFAABQ3BQoNPXr10+PPvqoVq5cqZycHOXk5GjFihUaNmyY+vbtW9g9AgAAFLkCXT33zDPPaP/+/QoPD5eb25lV5ObmasCAAZzTBAAASqUChSZ3d3fNmzdPzzzzjDZv3iwvLy81adJEwcHBhd0fAABAsVCg0JSnbt26qlu3bmH1AgAAUGwVKDTl5OQoNjZWy5cvV1pamnJzc52Wr1ixolCaAwAAKC4KFJqGDRum2NhYRUVFqXHjxnI4HIXdFwAAQLFSoNA0d+5cffrpp+rWrVth9wMAAFAsFeiWA+7u7qpdu3Zh9wIAAFBsFSg0jRo1SjNnzpQxprD7AQAAKJYK9PHcd999p5UrV+qbb75Ro0aNVKZMGafln3/+eaE0BwAAUFwUKDT5+fmpZ8+ehd0LAABAsVWg0DR79uzC7gMAAKBYK9A5TZJ0+vRpLVu2TG+//baOHTsmSTp48KCysrIKrTkAAIDiokBHmg4cOKAuXbooKSlJ2dnZuvXWW1W+fHm9+OKLys7O1qxZswq7TwAAgCJVoCNNw4YNU6tWrXT06FF5eXlZ83v27Knly5cXWnMAAADFRYGONH377bf6/vvv5e7u7jQ/JCREf/zxR6E0BgAAUJwU6EhTbm6ucnJy8s3//fffVb58+StuCgAAoLgpUGjq3LmzZsyYYU07HA5lZWVpwoQJfLUKAAAolQr08dy0adMUGRmphg0b6sSJE7rrrru0e/duVa5cWZ988klh9wgAAFDkChSaqlWrps2bN2vu3LnasmWLsrKyFBMTo/79+zudGA4AAFBaFCg0SZKbm5vuvvvuwuwFAACg2CpQaPrwww8vunzAgAEFagYAAKC4KlBoGjZsmNP0qVOn9Ndff8nd3V1ly5YlNAEAgFKnQFfPHT161OmRlZWlnTt3qn379pwIDgAASqUCf/fcuerUqaMXXngh31EoAACA0qDQQpN05uTwgwcPFuYqAQAAioUCndO0cOFCp2ljjJKTk/X666+rXbt2hdIYAABAcVKg0NSjRw+naYfDoSpVquiWW27RtGnTCqMvAACAYqVAoSk3N7ew+wAAACjWCvWcJgAAgNKqQEeaRo4cabt2+vTpBdkEAABAsVKg0LRp0yZt2rRJp06dUr169SRJu3btkqurq66//nqrzuFwFE6XAAAARaxAoal79+4qX768PvjgA1WoUEHSmRteDho0SB06dNCoUaMKtUkAAICiVqBzmqZNm6YpU6ZYgUmSKlSooGeffZar5wAAQKlUoNCUmZmpQ4cO5Zt/6NAhHTt27IqbAgAAKG4KFJp69uypQYMG6fPPP9fvv/+u33//Xf/73/8UExOjXr16FXaPAAAARa5A5zTNmjVLjz32mO666y6dOnXqzIrc3BQTE6OpU6cWaoMAAADFQYFCU9myZfXmm29q6tSp2rt3rySpVq1aKleuXKE2BwAAUFxc0c0tk5OTlZycrDp16qhcuXIyxhRWXwAAAMVKgULT4cOHFR4errp166pbt25KTk6WJMXExHC7AQAAUCoVKDSNGDFCZcqUUVJSksqWLWvN79Onj+Li4gqtOQAAgOKiQOc0LV26VEuWLFG1atWc5tepU0cHDhwolMYAAACKkwIdaTp+/LjTEaY8R44ckYeHxxU3BQAAUNwUKDR16NBBH374oTXtcDiUm5url156SZ06dSq05gAAAIqLAn0899JLLyk8PFwbNmzQyZMnNWbMGG3fvl1HjhzR2rVrC7tHAACAIlegI02NGzfWrl271L59e91xxx06fvy4evXqpU2bNqlWrVqF3SMAAECRu+wjTadOnVKXLl00a9YsPfnkk1ejJwAAgGLnso80lSlTRlu2bLkavQAAABRbBfp47u6779Z7771X2L0AAAAUWwU6Efz06dN6//33tWzZMrVs2TLfd85Nnz69UJoDAAAoLi7rSNO+ffuUm5urbdu26frrr1f58uW1a9cubdq0yXokJCTYXt+aNWvUvXt3BQUFyeFwaMGCBU7LjTEaP368qlatKi8vL0VERGj37t1ONUeOHFH//v3l4+MjPz8/xcTEKCsry6lmy5Yt6tChgzw9PVW9enW99NJL+XqZP3++6tevL09PTzVp0kRff/217XEAAIDS77JCU506dfTnn39q5cqVWrlypfz9/TV37lxreuXKlVqxYoXt9R0/flzNmjXTG2+8cd7lL730kl599VXNmjVL69evV7ly5RQZGakTJ05YNf3799f27dsVHx+vRYsWac2aNXrggQes5ZmZmercubOCg4O1ceNGTZ06VRMnTtQ777xj1Xz//ffq16+fYmJitGnTJvXo0UM9evTQtm3bLmf3AACAUsxhjDF2i11cXJSSkiJ/f39Jko+PjxISElSzZs0rb8Th0BdffKEePXpIOnOUKSgoSKNGjdJjjz0mScrIyFBAQIBiY2PVt29f/frrr2rYsKF++ukntWrVSpIUFxenbt266ffff1dQUJDeeustPfnkk0pJSZG7u7skaezYsVqwYIF27Ngh6cx35h0/flyLFi2y+mnTpo2aN2+uWbNm2eo/MzNTvr6+ysjIkI+PzxXvj3OFjF18yZr9L0QV+nYBACjNLufvd4FOBM9zGXnrsiUmJiolJUURERHWPF9fX7Vu3Vrr1q2TJK1bt05+fn5WYJKkiIgIubi4aP369VbNTTfdZAUmSYqMjNTOnTt19OhRq+bs7eTV5G0HAADgsk4Edzgccjgc+eZdDSkpKZKkgIAAp/kBAQHWsrOPeuVxc3NTxYoVnWpCQ0PzrSNvWYUKFZSSknLR7ZxPdna2srOzrenMzMzLGR4AAChhLis0GWM0cOBA60t5T5w4oYceeijf1XOff/554XVYTE2ZMkWTJk0q6jYAAMA1clmhKTo62mn67rvvLtRmzhYYGChJSk1NVdWqVa35qampat68uVWTlpbm9LzTp0/ryJEj1vMDAwOVmprqVJM3famavOXnM27cOI0cOdKazszMVPXq1S9niAAAoAS5rNA0e/bsq9VHPqGhoQoMDNTy5cutkJSZman169dr8ODBkqSwsDClp6dr48aNatmypSRpxYoVys3NVevWra2aJ598UqdOnVKZMmUkSfHx8apXr54qVKhg1SxfvlzDhw+3th8fH6+wsLAL9ufh4WEdcQMAAKXfFZ0IfqWysrKUkJBg3dspMTFRCQkJSkpKksPh0PDhw/Xss89q4cKF2rp1qwYMGKCgoCDrCrsGDRqoS5cuuv/++/Xjjz9q7dq1Gjp0qPr27augoCBJ0l133SV3d3fFxMRo+/btmjdvnmbOnOl0lGjYsGGKi4vTtGnTtGPHDk2cOFEbNmzQ0KFDr/UuAQAAxVSB7gheWDZs2KBOnTpZ03lBJjo6WrGxsRozZoyOHz+uBx54QOnp6Wrfvr3i4uLk6elpPeejjz7S0KFDFR4eLhcXF/Xu3VuvvvqqtdzX11dLly7VkCFD1LJlS1WuXFnjx493updT27Zt9fHHH+upp57SE088oTp16mjBggVq3LjxNdgLAACgJLis+zThwrhPEwAAJc81u08TAADAPwWhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAbCE0AAAA2uBV1Ayg8IWMXX7Jm/wtR16ATAABKH440AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsKFYh6aJEyfK4XA4PerXr28tP3HihIYMGaJKlSrJ29tbvXv3VmpqqtM6kpKSFBUVpbJly8rf31+jR4/W6dOnnWpWrVql66+/Xh4eHqpdu7ZiY2OvxfAAAEAJUqxDkyQ1atRIycnJ1uO7776zlo0YMUJfffWV5s+fr9WrV+vgwYPq1auXtTwnJ0dRUVE6efKkvv/+e33wwQeKjY3V+PHjrZrExERFRUWpU6dOSkhI0PDhw3XfffdpyZIl13ScAACgeHMr6gYuxc3NTYGBgfnmZ2Rk6L333tPHH3+sW265RZI0e/ZsNWjQQD/88IPatGmjpUuX6pdfftGyZcsUEBCg5s2b65lnntHjjz+uiRMnyt3dXbNmzVJoaKimTZsmSWrQoIG+++47vfLKK4qMjLymYwUAAMVXsT/StHv3bgUFBalmzZrq37+/kpKSJEkbN27UqVOnFBERYdXWr19fNWrU0Lp16yRJ69atU5MmTRQQEGDVREZGKjMzU9u3b7dqzl5HXk3eOgAAAKRifqSpdevWio2NVb169ZScnKxJkyapQ4cO2rZtm1JSUuTu7i4/Pz+n5wQEBCglJUWSlJKS4hSY8pbnLbtYTWZmpv7++295eXmdt7fs7GxlZ2db05mZmVc0VgAAULwV69DUtWtX699NmzZV69atFRwcrE8//fSCYeZamTJliiZNmlSkPQAAgGun2H88dzY/Pz/VrVtXe/bsUWBgoE6ePKn09HSnmtTUVOscqMDAwHxX0+VNX6rGx8fnosFs3LhxysjIsB6//fbblQ4PAAAUYyUqNGVlZWnv3r2qWrWqWrZsqTJlymj58uXW8p07dyopKUlhYWGSpLCwMG3dulVpaWlWTXx8vHx8fNSwYUOr5ux15NXkreNCPDw85OPj4/QAAAClV7EOTY899phWr16t/fv36/vvv1fPnj3l6uqqfv36ydfXVzExMRo5cqRWrlypjRs3atCgQQoLC1ObNm0kSZ07d1bDhg11zz33aPPmzVqyZImeeuopDRkyRB4eHpKkhx56SPv27dOYMWO0Y8cOvfnmm/r00081YsSIohw6AAAoZor1OU2///67+vXrp8OHD6tKlSpq3769fvjhB1WpUkWS9Morr8jFxUW9e/dWdna2IiMj9eabb1rPd3V11aJFizR48GCFhYWpXLlyio6O1uTJk62a0NBQLV68WCNGjNDMmTNVrVo1vfvuu9xuAAAAOHEYY0xRN1EaZGZmytfXVxkZGVflo7qQsYsLZT37X4gqlPUAAFAaXM7f72L98RwAAEBxQWgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA1uRd0Arq2QsYsvWbP/hahr0AkAACULR5oAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAb3Iq6ARQ/IWMXX7Jm/wtR16ATAACKD440AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCBr1FBgfBVKwCAfxqONAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAbCE0AAAA2cJ8mXDXcywkAUJpwpAkAAMAGQhMAAIANhCYAAAAbOKcJRYrzngAAJQVHmgAAAGwgNAEAANjAx3Mo9vgIDwBKv5Lwu57QdI433nhDU6dOVUpKipo1a6bXXntNN954Y1G3hUsoCT9sAICSjdB0lnnz5mnkyJGaNWuWWrdurRkzZigyMlI7d+6Uv79/UbeHK0SwAgBcCULTWaZPn677779fgwYNkiTNmjVLixcv1vvvv6+xY8cWcXe4FuwEKzsIXwBQ+hCa/r+TJ09q48aNGjdunDXPxcVFERERWrduXRF2hpKI8AUApQ+h6f/7888/lZOTo4CAAKf5AQEB2rFjR7767OxsZWdnW9MZGRmSpMzMzKvSX272X1dlvSjeaoyYX9QtALZsmxR5yZrGE5Zcs22h5LHzd+5q/I3NW6cx5pK1hKYCmjJliiZNmpRvfvXq1YugGwAoWr4zSue2ULxczdf+2LFj8vX1vWgNoen/q1y5slxdXZWamuo0PzU1VYGBgfnqx40bp5EjR1rTubm5OnLkiCpVqiSHw1FofWVmZqp69er67bff5OPjU2jrLQ4YW8lUmscmle7xMbaSibFdXcYYHTt2TEFBQZesJTT9f+7u7mrZsqWWL1+uHj16SDoThJYvX66hQ4fmq/fw8JCHh4fTPD8/v6vWn4+PT6n7YcnD2Eqm0jw2qXSPj7GVTIzt6rnUEaY8hKazjBw5UtHR0WrVqpVuvPFGzZgxQ8ePH7eupgMAAP9chKaz9OnTR4cOHdL48eOVkpKi5s2bKy4uLt/J4QAA4J+H0HSOoUOHnvfjuKLi4eGhCRMm5PsosDRgbCVTaR6bVLrHx9hKJsZWfDiMnWvsAAAA/uFciroBAACAkoDQBAAAYAOhCQAAwAZCEwAAgA2EpmLujTfeUEhIiDw9PdW6dWv9+OOPRd2SkzVr1qh79+4KCgqSw+HQggULnJYbYzR+/HhVrVpVXl5eioiI0O7du51qjhw5ov79+8vHx0d+fn6KiYlRVlaWU82WLVvUoUMHeXp6qnr16nrppZeu9tA0ZcoU3XDDDSpfvrz8/f3Vo0cP7dy506nmxIkTGjJkiCpVqiRvb2/17t07313lk5KSFBUVpbJly8rf31+jR4/W6dOnnWpWrVql66+/Xh4eHqpdu7ZiY2Ov6tjeeustNW3a1LqhXFhYmL755psSP67zeeGFF+RwODR8+HBrXkkd38SJE+VwOJwe9evXL/HjyvPHH3/o7rvvVqVKleTl5aUmTZpow4YN1vKS/PskJCQk32vncDg0ZMgQSSX3tcvJydHTTz+t0NBQeXl5qVatWnrmmWecvsetJL9u+RgUW3PnzjXu7u7m/fffN9u3bzf333+/8fPzM6mpqUXdmuXrr782Tz75pPn888+NJPPFF184LX/hhReMr6+vWbBggdm8ebO5/fbbTWhoqPn777+tmi5duphmzZqZH374wXz77bemdu3apl+/ftbyjIwMExAQYPr372+2bdtmPvnkE+Pl5WXefvvtqzq2yMhIM3v2bLNt2zaTkJBgunXrZmrUqGGysrKsmoceeshUr17dLF++3GzYsMG0adPGtG3b1lp++vRp07hxYxMREWE2bdpkvv76a1O5cmUzbtw4q2bfvn2mbNmyZuTIkeaXX34xr732mnF1dTVxcXFXbWwLFy40ixcvNrt27TI7d+40TzzxhClTpozZtm1biR7XuX788UcTEhJimjZtaoYNG2bNL6njmzBhgmnUqJFJTk62HocOHSrx4zLGmCNHjpjg4GAzcOBAs379erNv3z6zZMkSs2fPHqumJP8+SUtLc3rd4uPjjSSzcuVKY0zJfe2ee+45U6lSJbNo0SKTmJho5s+fb7y9vc3MmTOtmpL8up2L0FSM3XjjjWbIkCHWdE5OjgkKCjJTpkwpwq4u7NzQlJubawIDA83UqVOteenp6cbDw8N88sknxhhjfvnlFyPJ/PTTT1bNN998YxwOh/njjz+MMca8+eabpkKFCiY7O9uqefzxx029evWu8oicpaWlGUlm9erVxpgzYylTpoyZP3++VfPrr78aSWbdunXGmDOh0sXFxaSkpFg1b731lvHx8bHGM2bMGNOoUSOnbfXp08dERkZe7SE5qVChgnn33XdLzbiOHTtm6tSpY+Lj403Hjh2t0FSSxzdhwgTTrFmz8y4ryeMy5szPdPv27S+4vLT9Phk2bJipVauWyc3NLdGvXVRUlLn33nud5vXq1cv079/fGFP6Xjc+niumTp48qY0bNyoiIsKa5+LiooiICK1bt64IO7MvMTFRKSkpTmPw9fVV69atrTGsW7dOfn5+atWqlVUTEREhFxcXrV+/3qq56aab5O7ubtVERkZq586dOnr06DUajZSRkSFJqlixoiRp48aNOnXqlNP46tevrxo1ajiNr0mTJk53lY+MjFRmZqa2b99u1Zy9jryaa/U65+TkaO7cuTp+/LjCwsJKzbiGDBmiqKiofD2U9PHt3r1bQUFBqlmzpvr376+kpKRSMa6FCxeqVatW+te//iV/f3+1aNFC//nPf6zlpen3ycmTJzVnzhzde++9cjgcJfq1a9u2rZYvX65du3ZJkjZv3qzvvvtOXbt2lVS6XjeJc5qKrT///FM5OTn5vsIlICBAKSkpRdTV5cnr82JjSElJkb+/v9NyNzc3VaxY0anmfOs4extXW25uroYPH6527dqpcePG1rbd3d3zfVHzueO7VO8XqsnMzNTff/99NYYjSdq6dau8vb3l4eGhhx56SF988YUaNmxY4sclSXPnztXPP/+sKVOm5FtWksfXunVrxcbGKi4uTm+99ZYSExPVoUMHHTt2rESPS5L27dunt956S3Xq1NGSJUs0ePBgPfroo/rggw+c+isNv08WLFig9PR0DRw40NpuSX3txo4dq759+6p+/foqU6aMWrRooeHDh6t///5OvZWG103ia1QAW4YMGaJt27bpu+++K+pWCk29evWUkJCgjIwMffbZZ4qOjtbq1auLuq0r9ttvv2nYsGGKj4+Xp6dnUbdTqPL+712SmjZtqtatWys4OFiffvqpvLy8irCzK5ebm6tWrVrp+eeflyS1aNFC27Zt06xZsxQdHV3E3RWu9957T127dlVQUFBRt3LFPv30U3300Uf6+OOP1ahRIyUkJGj48OEKCgoqda+bxJGmYqty5cpydXXNd/VEamqqAgMDi6iry5PX58XGEBgYqLS0NKflp0+f1pEjR5xqzreOs7dxNQ0dOlSLFi3SypUrVa1aNWt+YGCgTp48qfT09Hy9XU7vF6rx8fG5qn8I3d3dVbt2bbVs2VJTpkxRs2bNNHPmzBI/ro0bNyotLU3XX3+93Nzc5ObmptWrV+vVV1+Vm5ubAgICSvT4zubn56e6detqz549Jf51q1q1qho2bOg0r0GDBtbHj6Xl98mBAwe0bNky3Xfffda8kvzajR492jra1KRJE91zzz0aMWKEdZS3tLxueQhNxZS7u7tatmyp5cuXW/Nyc3O1fPlyhYWFFWFn9oWGhiowMNBpDJmZmVq/fr01hrCwMKWnp2vjxo1WzYoVK5Sbm6vWrVtbNWvWrNGpU6esmvj4eNWrV08VKlS4av0bYzR06FB98cUXWrFihUJDQ52Wt2zZUmXKlHEa386dO5WUlOQ0vq1btzr9QoiPj5ePj4/1ByIsLMxpHXk11/p1zs3NVXZ2dokfV3h4uLZu3aqEhATr0apVK/Xv39/6d0ke39mysrK0d+9eVa1atcS/bu3atct3S49du3YpODhYUsn/fZJn9uzZ8vf3V1RUlDWvJL92f/31l1xcnKOEq6urcnNzJZWe181yTU87x2WZO3eu8fDwMLGxseaXX34xDzzwgPHz83O6eqKoHTt2zGzatMls2rTJSDLTp083mzZtMgcOHDDGnLnU1M/Pz3z55Zdmy5Yt5o477jjvpaYtWrQw69evN999952pU6eO06Wm6enpJiAgwNxzzz1m27ZtZu7cuaZs2bJX/VLTwYMHG19fX7Nq1SqnS4X/+usvq+ahhx4yNWrUMCtWrDAbNmwwYWFhJiwszFqed5lw586dTUJCgomLizNVqlQ572XCo0ePNr/++qt54403rvplwmPHjjWrV682iYmJZsuWLWbs2LHG4XCYpUuXluhxXcjZV88ZU3LHN2rUKLNq1SqTmJho1q5dayIiIkzlypVNWlpaiR6XMWduD+Hm5maee+45s3v3bvPRRx+ZsmXLmjlz5lg1Jfn3iTFnroCuUaOGefzxx/MtK6mvXXR0tLnuuuusWw58/vnnpnLlymbMmDFWTUl/3c5GaCrmXnvtNVOjRg3j7u5ubrzxRvPDDz8UdUtOVq5caSTle0RHRxtjzlxu+vTTT5uAgADj4eFhwsPDzc6dO53WcfjwYdOvXz/j7e1tfHx8zKBBg8yxY8ecajZv3mzat29vPDw8zHXXXWdeeOGFqz62841Lkpk9e7ZV8/fff5uHH37YVKhQwZQtW9b07NnTJCcnO61n//79pmvXrsbLy8tUrlzZjBo1ypw6dcqpZuXKlaZ58+bG3d3d1KxZ02kbV8O9995rgoODjbu7u6lSpYoJDw+3AlNJHteFnBuaSur4+vTpY6pWrWrc3d3NddddZ/r06eN0H6OSOq48X331lWncuLHx8PAw9evXN++8847T8pL8+8QYY5YsWWIk5evZmJL72mVmZpphw4aZGjVqGE9PT1OzZk3z5JNPOt0aoKS/bmdzGHPWbTsBAABwXpzTBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAJwTezfv18Oh0MJCQlF3UqpEhsbKz8/v6JuA/hHIDQBsM3hcFz0MXHixKJusdjKC415j0qVKqlz587atGmT7XWEhIRoxowZTvP69OmjXbt2FWqvq1atksPhyPcFssA/nVtRNwCg5EhOTrb+PW/ePI0fP97pS1a9vb2Loq1iJScnRw6HI9+XmOZZtmyZGjVqpN9//12PPvqounbtqh07dhT4aJGXl9dV+wZ7AM440gTAtsDAQOvh6+srh8NhTfv7+2v69OmqVq2aPDw81Lx5c8XFxV1wXTk5Obr33ntVv359JSUlSZK+/PJLXX/99fL09FTNmjU1adIknT592nqOw+HQu+++q549e6ps2bKqU6eOFi5caC0/evSo+vfvrypVqsjLy0t16tTR7NmzL9jDzTffrKFDh2ro0KHy9fVV5cqV9fTTT+vsb5fKzs7WY489puuuu07lypVT69attWrVKmt53sdjCxcuVMOGDeXh4WGN53wqVaqkwMBAtWrVSi+//LJSU1O1fv167d27V3fccYcCAgLk7e2tG264QcuWLXPq9cCBAxoxYoR1tOrs7Z/tSvbj/v371alTJ0lShQoV5HA4NHDgwAuOB/hHuebfdgegVJg9e7bx9fW1pqdPn258fHzMJ598Ynbs2GHGjBljypQpY3bt2mWMMSYxMdFIMps2bTInTpwwPXv2NC1atDBpaWnGGGPWrFljfHx8TGxsrNm7d69ZunSpCQkJMRMnTrS2IclUq1bNfPzxx2b37t3m0UcfNd7e3ubw4cPGGGOGDBlimjdvbn766SeTmJho4uPjzcKFCy84ho4dOxpvb28zbNgws2PHDjNnzhxTtmxZpy+Kve+++0zbtm3NmjVrzJ49e8zUqVONh4eHNa7Zs2ebMmXKmLZt25q1a9eaHTt2mOPHj+fb1tnjz/Pzzz8bSWbhwoUmISHBzJo1y2zdutXs2rXLPPXUU8bT09McOHDAGHPmC02rVatmJk+ebJKTk60vcz33dbjS/Xj69Gnzv//9z/pi2eTkZJOenn7R9wLwT0FoAlAg5/6xDgoKMs8995xTzQ033GAefvhhY8z/hYZvv/3WhIeHm/bt2zv9MQ4PDzfPP/+80/P/+9//mqpVq1rTksxTTz1lTWdlZRlJ5ptvvjHGGNO9e3czaNAg22Po2LGjadCggcnNzbXmPf7446ZBgwbGGGMOHDhgXF1dzR9//OH0vPDwcDNu3DhrP0gyCQkJF93WuaHp6NGjpmfPnsbb29ukpKSc9zmNGjUyr732mjUdHBxsXnnlFaeac1+HwtiPK1euNJLM0aNHLzom4J+Gc5oAXLHMzEwdPHhQ7dq1c5rfrl07bd682Wlev379VK1aNa1YscLpXJzNmzdr7dq1eu6556x5OTk5OnHihP766y+VLVtWktS0aVNrebly5eTj46O0tDRJ0uDBg9W7d2/9/PPP6ty5s3r06KG2bdtetPc2bdpYH3VJUlhYmKZNm6acnBxt3bpVOTk5qlu3rtNzsrOzValSJWva3d3dqa+Ladu2rVxcXHT8+HHVrFlT8+bNU0BAgLKysjRx4kQtXrxYycnJOn36tP7++++LftR3PoWxHwGcH6EJwDXVrVs3zZkzR+vWrdMtt9xizc/KytKkSZPUq1evfM/x9PS0/l2mTBmnZQ6HQ7m5uZKkrl276sCBA/r6668VHx+v8PBwDRkyRC+//HKBes3KypKrq6s2btwoV1dXp2Vnn/Tu5eXlFLwuZt68eWrYsKEqVarkdC7SY489pvj4eL388suqXbu2vLy8dOedd+rkyZOX3fOV7kcA50doAnDFfHx8FBQUpLVr16pjx47W/LVr1+rGG290qh08eLAaN26s22+/XYsXL7bqr7/+eu3cuVO1a9e+ol6qVKmi6OhoRUdHq0OHDho9evRFQ9P69eudpn/44QfVqVNHrq6uatGihXJycpSWlqYOHTpcUV95qlevrlq1auWbv3btWg0cOFA9e/aUdCb87N+/36nG3d1dOTk5F11/YexHd3d3SbrktoB/GkITgEIxevRoTZgwQbVq1VLz5s01e/ZsJSQk6KOPPspX+8gjjygnJ0e33XabvvnmG7Vv317jx4/Xbbfdpho1aujOO++Ui4uLNm/erG3btunZZ5+11cP48ePVsmVLNWrUSNnZ2Vq0aJEaNGhw0eckJSVp5MiRevDBB/Xzzz/rtdde07Rp0yRJdevWVf/+/TVgwABNmzZNLVq00KFDh7R8+XI1bdpUUVFRl7+jLqBOnTr6/PPP1b17dzkcDj399NP5jvyEhIRozZo16tu3rzw8PFS5cuXz7oMr3Y/BwcFyOBxatGiRunXrJi8vL24nAYhbDgAoJI8++qhGjhypUaNGqUmTJoqLi9PChQtVp06d89YPHz5ckyZNUrdu3fT9998rMjJSixYt0tKlS3XDDTeoTZs2euWVVxQcHGy7B3d3d40bN05NmzbVTTfdJFdXV82dO/eizxkwYID+/vtv3XjjjRoyZIiGDRumBx54wFo+e/ZsDRgwQKNGjVK9evXUo0cP/fTTT6pRo4btvuyYPn26KlSooLZt26p79+6KjIzU9ddf71QzefJk7d+/X7Vq1VKVKlXOu57C2I/XXXedJk2apLFjxyogIEBDhw69orEBpYXDmLNuSAIA/yA333yzmjdvnu8u2wBwPhxpAgAAsIHQBAAAYAMfzwEAANjAkSYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAG/4f3YY+4qjMjKQAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Saving the dataset (1/1 shards): 100%|██████████| 41978/41978 [00:01<00:00, 26285.94 examples/s]\n", + "Saving the dataset (1/1 shards): 100%|██████████| 4665/4665 [00:00<00:00, 19336.65 examples/s]\n", + "Saving the dataset (1/1 shards): 100%|██████████| 4483/4483 [00:00<00:00, 31687.16 examples/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing data for mortality...\n", + "Train size: 34459\n", + "Val size: 3829\n", + "Test size: 6637\n", + "Tokenizing with 12 CPU processes...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Map (num_proc=12): 100%|██████████| 34459/34459 [00:07<00:00, 4543.71 examples/s]\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAk0AAAHHCAYAAACiOWx7AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABVyElEQVR4nO3deVhUZf8/8PewzIDgDAoCTiLgkoobbiGuGSQqmuuTGiUaqflAipJb5dqCaZpLJdkilZpmT5JpoggqpWSK4i5qImg6YCmMoLLevz/8cb4eQT0gxgy9X9c119Oc+3PO+dwzCO/nnDNnVEIIASIiIiJ6IIvqboCIiIjIHDA0ERERESnA0ERERESkAEMTERERkQIMTUREREQKMDQRERERKcDQRERERKQAQxMRERGRAgxNRERERAowNBFV0ty5c6FSqf6RfT399NN4+umnpee7d++GSqXC999//4/sf/To0fDw8PhH9lVZubm5eOWVV+Dq6gqVSoXw8PDHur/S9/+vv/56rPuhijOHn1cyTwxNRACio6OhUqmkh42NDfR6PQICArB8+XLcuHGjSvZz+fJlzJ07FykpKVWyvapkyr0p8d577yE6OhoTJkzAN998g5deeqlMTWnQedjj7oBKVWf06NGy11mr1aJt27ZYvHgx8vPzK7St6v55PXnyJObOnYsLFy5Uy/6pelhVdwNEpmT+/Pnw9PREYWEhDAYDdu/ejfDwcCxZsgSbN29GmzZtpNq33noLM2bMqND2L1++jHnz5sHDwwPe3t6K19uxY0eF9lMZD+rts88+Q0lJyWPv4VEkJCSgc+fOmDNnzn1rhgwZgiZNmkjPc3NzMWHCBAwePBhDhgyRlru4uDzWXv/NNBoNPv/8cwBAdnY2/ve//+H111/HgQMHsH79esXbqe6f15MnT2LevHl4+umneVTrX4Shieguffv2RceOHaXnM2fOREJCAvr374/nnnsOp06dgq2tLQDAysoKVlaP95/QzZs3UatWLajV6se6n4extrau1v0rkZWVBS8vrwfWtGnTRhZ8//rrL0yYMAFt2rTBiy+++LhbrPGEELh9+7b0b6Q8VlZWstf6v//9L3x8fLBhwwYsWbIEer3+kfswh59XMk88PUf0EM888wxmzZqF9PR0rFmzRlpe3jVNcXFx6NatGxwcHGBvb49mzZrhjTfeAHDnOqROnToBAMaMGSOdooiOjgZw57qlVq1aITk5GT169ECtWrWkde+9pqlUcXEx3njjDbi6usLOzg7PPfccLl68KKvx8PDA6NGjy6x79zYf1lt514jk5eUhIiICbm5u0Gg0aNasGT744AMIIWR1KpUKYWFhiImJQatWraDRaNCyZUvExsaW/4LfIysrCyEhIXBxcYGNjQ3atm2Lr776Shovvb4rLS0NW7dulXp/lNMmCQkJ6N69O+zs7ODg4ICBAwfi1KlTD10vPT0dTZo0QatWrZCZmQngztGU8PBw6XVq0qQJ3n//fdmRkAsXLkClUuGDDz7AqlWr0LhxY2g0GnTq1AkHDhyQ7cNgMGDMmDFo0KABNBoN6tevj4EDBz50vqNHj4a9vT3Onz+PgIAA2NnZQa/XY/78+WXes5KSEixduhQtW7aEjY0NXFxcMH78eFy/fl1W5+Hhgf79+2P79u3o2LEjbG1t8emnnz70dbqbhYWF9HN44cIFXLt2Da+//jpat24Ne3t7aLVa9O3bF0eOHJHWqczPa0Xn9Ouvv+Kpp56CjY0NGjVqhK+//lqqiY6Oxn/+8x8AQK9evaT97969u0JzJ/PDI01ECrz00kt44403sGPHDowdO7bcmhMnTqB///5o06YN5s+fD41Gg3PnzmHv3r0AgBYtWmD+/PmYPXs2xo0bh+7duwMAunTpIm3j77//Rt++fTFixAi8+OKLDz1N9O6770KlUmH69OnIysrC0qVL4e/vj5SUlAf+v/17KentbkIIPPfcc9i1axdCQkLg7e2N7du3Y+rUqfjzzz/x4Ycfyup//fVX/PDDD/jvf/+L2rVrY/ny5Rg6dCgyMjLg6Oh4375u3bqFp59+GufOnUNYWBg8PT2xceNGjB49GtnZ2Zg0aRJatGiBb775BpMnT0aDBg0QEREBAKhXr57i+d9t586d6Nu3Lxo1aoS5c+fi1q1bWLFiBbp27YpDhw7d91TMH3/8gWeeeQZ169ZFXFwcnJyccPPmTfTs2RN//vknxo8fj4YNG2Lfvn2YOXMmrly5gqVLl8q2sW7dOty4cQPjx4+HSqXCwoULMWTIEJw/f146ejJ06FCcOHECr732Gjw8PJCVlYW4uDhkZGQ89DRRcXEx+vTpg86dO2PhwoWIjY3FnDlzUFRUhPnz50t148ePR3R0NMaMGYOJEyciLS0NH330EQ4fPoy9e/fKjuSkpqZi5MiRGD9+PMaOHYtmzZpV+DX/448/AACOjo44f/48YmJi8J///Aeenp7IzMzEp59+ip49e+LkyZPQ6/UV/nmt6JzOnTuHYcOGISQkBMHBwfjyyy8xevRodOjQAS1btkSPHj0wceJELF++HG+88QZatGgBANL/Ug0miEisXr1aABAHDhy4b41OpxPt2rWTns+ZM0fc/U/oww8/FADE1atX77uNAwcOCABi9erVZcZ69uwpAIioqKhyx3r27Ck937VrlwAgnnjiCWE0GqXl3333nQAgli1bJi1zd3cXwcHBD93mg3oLDg4W7u7u0vOYmBgBQLzzzjuyumHDhgmVSiXOnTsnLQMg1Gq1bNmRI0cEALFixYoy+7rb0qVLBQCxZs0aaVlBQYHw9fUV9vb2srm7u7uLwMDAB27vXlevXhUAxJw5c6Rl3t7ewtnZWfz999+yfi0sLMSoUaOkZaXv/9WrV8WpU6eEXq8XnTp1EteuXZNq3n77bWFnZyfOnDkj2++MGTOEpaWlyMjIEEIIkZaWJgAIR0dH2fo//vijACB++uknIYQQ169fFwDEokWLKjRPIe68hwDEa6+9Ji0rKSkRgYGBQq1WSz+3v/zyiwAg1q5dK1s/Nja2zHJ3d3cBQMTGxiruwc7OTly9elVcvXpVnDt3Trz33ntCpVKJNm3aCCGEuH37tiguLpatl5aWJjQajZg/f760rCI/r5WZU2JiorQsKytLaDQaERERIS3buHGjACB27dqlaO5UM/D0HJFC9vb2D/wUnYODAwDgxx9/rPRFqBqNBmPGjFFcP2rUKNSuXVt6PmzYMNSvXx8///xzpfav1M8//wxLS0tMnDhRtjwiIgJCCGzbtk223N/fH40bN5aet2nTBlqtFufPn3/oflxdXTFy5EhpmbW1NSZOnIjc3Fzs2bOnCmbzf65cuYKUlBSMHj0adevWlfX77LPPlvu6Hj9+HD179oSHhwd27tyJOnXqSGMbN25E9+7dUadOHfz111/Sw9/fH8XFxUhMTJRta/jw4bL1S4+glL5Otra2UKvV2L17d5nTSkqFhYVJ/1166rSgoAA7d+6UetbpdHj22WdlPXfo0AH29vbYtWuXbHuenp4ICAhQvP+8vDzUq1cP9erVQ5MmTfDGG2/A19cXmzZtAnDn34CFxZ0/TcXFxfj777+lU92HDh2q1JwrOicvLy/ptQfuHLVs1qzZQ39eqebj6TkihXJzc+Hs7Hzf8eHDh+Pzzz/HK6+8ghkzZsDPzw9DhgzBsGHDpD8CD/PEE09U6KLvpk2byp6rVCo0adLksX8MOj09HXq9XhbYgP87PZGeni5b3rBhwzLbqFOnzkP/8Kenp6Np06ZlXr/77edRlW6vvFNMLVq0wPbt25GXlwc7Oztp+YABA+Di4oLt27fD3t5ets7Zs2dx9OjR+54qzMrKkj2/93UqDVClr5NGo8H777+PiIgIuLi4oHPnzujfvz9GjRoFV1fXh87PwsICjRo1ki178sknAUD6mTl79ixycnLu+7N+b8+enp4P3e/dbGxs8NNPP0nz8fT0RIMGDaTxkpISLFu2DJ988gnS0tJQXFwsjT3oVO6DVHROlf15pZqPoYlIgUuXLiEnJ0f2cfV72draIjExEbt27cLWrVsRGxuLDRs24JlnnsGOHTtgaWn50P1U5Dokpe53A87i4mJFPVWF++1H3HMBsjkaOnQovvrqK6xduxbjx4+XjZWUlODZZ5/FtGnTyl23NLCUUvI6hYeHY8CAAYiJicH27dsxa9YsREZGIiEhAe3atXvE2dzp2dnZGWvXri13/N4AWNGfWUtLS/j7+993/L333sOsWbPw8ssv4+2330bdunVhYWGB8PDwSh/BreicavLPKz0ahiYiBb755hsAeOhpCAsLC/j5+cHPzw9LlizBe++9hzfffBO7du2Cv79/ld9B/OzZs7LnQgicO3dO9rH6OnXqIDs7u8y66enpsqMOFenN3d0dO3fuxI0bN2RHm06fPi2NVwV3d3ccPXoUJSUlsqNNVb2fu/cH3Lm4+V6nT5+Gk5OT7CgTACxatAhWVlbSRe4vvPCCNNa4cWPk5uY+MCRURuPGjREREYGIiAicPXsW3t7eWLx4sezTneUpKSnB+fPnZWHtzJkzACBdRN64cWPs3LkTXbt2fSwh/mG+//579OrVC1988YVseXZ2NpycnKTnFfl5fRxz+qe+DYBMC69pInqIhIQEvP322/D09ERQUNB9665du1ZmWelN90rvdlz6B7e8EFMZX3/9tew6q++//x5XrlxB3759pWWNGzfGb7/9hoKCAmnZli1bytyaoCK99evXD8XFxfjoo49kyz/88EOoVCrZ/h9Fv379YDAYsGHDBmlZUVERVqxYAXt7e/Ts2bNK9lOqfv368Pb2xldffSV7HY4fP44dO3agX79+ZdZRqVRYtWoVhg0bhuDgYGzevFkae/7555GUlITt27eXWS87OxtFRUUV6u/mzZu4ffu2bFnjxo1Ru3ZtxXfUvvs9E0Lgo48+grW1Nfz8/KSei4uL8fbbb5dZt6ioqMp+du/H0tKyzBGdjRs34s8//5Qtq8jP6+OYU1X/WybzwCNNRHfZtm0bTp8+jaKiImRmZiIhIQFxcXFwd3fH5s2bYWNjc99158+fj8TERAQGBsLd3R1ZWVn45JNP0KBBA3Tr1g3AnT9wDg4OiIqKQu3atWFnZwcfH58KXxdSqm7duujWrRvGjBmDzMxMLF26FE2aNJHdFuGVV17B999/jz59+uD555/HH3/8gTVr1sguzK5obwMGDECvXr3w5ptv4sKFC2jbti127NiBH3/8EeHh4WW2XVnjxo3Dp59+itGjRyM5ORkeHh74/vvvsXfvXixdurTMNVVVYdGiRejbty98fX0REhIi3XJAp9Nh7ty55a5jYWGBNWvWYNCgQXj++efx888/45lnnsHUqVOxefNm9O/fX/rIel5eHo4dO4bvv/8eFy5ckB09eZgzZ87Az88Pzz//PLy8vGBlZYVNmzYhMzMTI0aMeOj6NjY2iI2NRXBwMHx8fLBt2zZs3boVb7zxhnSKqmfPnhg/fjwiIyORkpKC3r17w9raGmfPnsXGjRuxbNkyDBs2THHPFdW/f3/Mnz8fY8aMQZcuXXDs2DGsXbu2zLVYFfl5fRxz8vb2hqWlJd5//33k5ORAo9HgmWeeeeB1j1QDVOMn94hMRuktB0ofarVauLq6imeffVYsW7ZM9tH2UvfeciA+Pl4MHDhQ6PV6oVarhV6vFyNHjizzcfMff/xReHl5CSsrK9lHpnv27ClatmxZbn/3u+XAt99+K2bOnCmcnZ2Fra2tCAwMFOnp6WXWX7x4sXjiiSeERqMRXbt2FQcPHiyzzQf1du9HuIUQ4saNG2Ly5MlCr9cLa2tr0bRpU7Fo0SJRUlIiqwMgQkNDy/R0v1sh3CszM1OMGTNGODk5CbVaLVq3bl3ux8yr6pYDQgixc+dO0bVrV2Frayu0Wq0YMGCAOHnypKzm7lsOlLp586bo2bOnsLe3F7/99psQ4s7rNHPmTNGkSROhVquFk5OT6NKli/jggw9EQUGBEOL/bjlQ3q0E7u7vr7/+EqGhoaJ58+bCzs5O6HQ64ePjI7777ruHzrX04/5//PGH6N27t6hVq5ZwcXERc+bMKfMRfyGEWLVqlejQoYOwtbUVtWvXFq1btxbTpk0Tly9flmoq+pqX9vAgt2/fFhEREaJ+/frC1tZWdO3aVSQlJT3yz+ujzqm8/X/22WeiUaNGwtLSkrcf+JdQCcEr24iIarrRo0fj+++/R25ubnW3QmS2eE0TERERkQIMTUREREQKMDQRERERKcBrmoiIiIgU4JEmIiIiIgUYmoiIiIgU4M0tq0hJSQkuX76M2rVr8/b6REREZkIIgRs3bkCv1z/0y9UZmqrI5cuX4ebmVt1tEBERUSVcvHgRDRo0eGANQ1MVKf06h4sXL0Kr1VZzN0RERKSE0WiEm5uboq9lYmiqIqWn5LRaLUMTERGRmVFyaQ0vBCciIiJSgKGJiIiISAGGJiIiIiIFGJqIiIiIFGBoIiIiIlKAoYmIiIhIAYYmIiIiIgUYmoiIiIgUYGgiIiIiUoChiYiIiEgBhiYiIiIiBRiaiIiIiBRgaCIiIiJSgKGJiIiISAGGJiIiIiIFrKq7AVLGY8bWh9ZcWBD4D3RCRET078QjTUREREQKMDQRERERKcDQRERERKQAQxMRERGRAgxNRERERAowNBEREREpwNBEREREpABDExEREZECDE1ERERECjA0ERERESlQraEpMTERAwYMgF6vh0qlQkxMTJmaU6dO4bnnnoNOp4OdnR06deqEjIwMafz27dsIDQ2Fo6Mj7O3tMXToUGRmZsq2kZGRgcDAQNSqVQvOzs6YOnUqioqKZDW7d+9G+/btodFo0KRJE0RHRz+OKRMREZGZqtbQlJeXh7Zt2+Ljjz8ud/yPP/5At27d0Lx5c+zevRtHjx7FrFmzYGNjI9VMnjwZP/30EzZu3Ig9e/bg8uXLGDJkiDReXFyMwMBAFBQUYN++ffjqq68QHR2N2bNnSzVpaWkIDAxEr169kJKSgvDwcLzyyivYvn3745s8ERERmRWVEEJUdxMAoFKpsGnTJgwaNEhaNmLECFhbW+Obb74pd52cnBzUq1cP69atw7BhwwAAp0+fRosWLZCUlITOnTtj27Zt6N+/Py5fvgwXFxcAQFRUFKZPn46rV69CrVZj+vTp2Lp1K44fPy7bd3Z2NmJjYxX1bzQaodPpkJOTA61WW8lX4f74hb1ERERVryJ/v032mqaSkhJs3boVTz75JAICAuDs7AwfHx/ZKbzk5GQUFhbC399fWta8eXM0bNgQSUlJAICkpCS0bt1aCkwAEBAQAKPRiBMnTkg1d2+jtKZ0G+XJz8+H0WiUPYiIiKjmMtnQlJWVhdzcXCxYsAB9+vTBjh07MHjwYAwZMgR79uwBABgMBqjVajg4OMjWdXFxgcFgkGruDkyl46VjD6oxGo24detWuf1FRkZCp9NJDzc3t0eeMxEREZkukw1NJSUlAICBAwdi8uTJ8Pb2xowZM9C/f39ERUVVc3fAzJkzkZOTIz0uXrxY3S0RERHRY2SyocnJyQlWVlbw8vKSLW/RooX06TlXV1cUFBQgOztbVpOZmQlXV1ep5t5P05U+f1iNVquFra1tuf1pNBpotVrZg4iIiGoukw1NarUanTp1Qmpqqmz5mTNn4O7uDgDo0KEDrK2tER8fL42npqYiIyMDvr6+AABfX18cO3YMWVlZUk1cXBy0Wq0UyHx9fWXbKK0p3QYRERGRVXXuPDc3F+fOnZOep6WlISUlBXXr1kXDhg0xdepUDB8+HD169ECvXr0QGxuLn376Cbt37wYA6HQ6hISEYMqUKahbty60Wi1ee+01+Pr6onPnzgCA3r17w8vLCy+99BIWLlwIg8GAt956C6GhodBoNACAV199FR999BGmTZuGl19+GQkJCfjuu++wdevDP7FGRERE/w7VesuB3bt3o1evXmWWBwcHSzeX/PLLLxEZGYlLly6hWbNmmDdvHgYOHCjV3r59GxEREfj222+Rn5+PgIAAfPLJJ9KpNwBIT0/HhAkTsHv3btjZ2SE4OBgLFiyAldX/Zcbdu3dj8uTJOHnyJBo0aIBZs2Zh9OjRiufCWw4QERGZn4r8/TaZ+zSZO4YmIiIi81Mj7tNEREREZEoYmoiIiIgUYGgiIiIiUoChiYiIiEgBhiYiIiIiBRiaiIiIiBRgaCIiIiJSgKGJiIiISAGGJiIiIiIFGJqIiIiIFGBoIiIiIlKAoYmIiIhIAYYmIiIiIgUYmoiIiIgUYGgiIiIiUoChiYiIiEgBhiYiIiIiBRiaiIiIiBRgaCIiIiJSgKGJiIiISAGGJiIiIiIFGJqIiIiIFGBoIiIiIlKAoYmIiIhIAYYmIiIiIgUYmoiIiIgUYGgiIiIiUoChiYiIiEgBhiYiIiIiBRiaiIiIiBRgaCIiIiJSoFpDU2JiIgYMGAC9Xg+VSoWYmJj71r766qtQqVRYunSpbPm1a9cQFBQErVYLBwcHhISEIDc3V1Zz9OhRdO/eHTY2NnBzc8PChQvLbH/jxo1o3rw5bGxs0Lp1a/z8889VMUUiIiKqIao1NOXl5aFt27b4+OOPH1i3adMm/Pbbb9Dr9WXGgoKCcOLECcTFxWHLli1ITEzEuHHjpHGj0YjevXvD3d0dycnJWLRoEebOnYtVq1ZJNfv27cPIkSMREhKCw4cPY9CgQRg0aBCOHz9edZMlIiIis6YSQojqbgIAVCoVNm3ahEGDBsmW//nnn/Dx8cH27dsRGBiI8PBwhIeHAwBOnToFLy8vHDhwAB07dgQAxMbGol+/frh06RL0ej1WrlyJN998EwaDAWq1GgAwY8YMxMTE4PTp0wCA4cOHIy8vD1u2bJH227lzZ3h7eyMqKkpR/0ajETqdDjk5OdBqtY/4apTlMWPrQ2suLAis8v0SERHVZBX5+23S1zSVlJTgpZdewtSpU9GyZcsy40lJSXBwcJACEwD4+/vDwsIC+/fvl2p69OghBSYACAgIQGpqKq5fvy7V+Pv7y7YdEBCApKSk+/aWn58Po9EoexAREVHNZdKh6f3334eVlRUmTpxY7rjBYICzs7NsmZWVFerWrQuDwSDVuLi4yGpKnz+spnS8PJGRkdDpdNLDzc2tYpMjIiIis2KyoSk5ORnLli1DdHQ0VCpVdbdTxsyZM5GTkyM9Ll68WN0tERER0WNksqHpl19+QVZWFho2bAgrKytYWVkhPT0dERER8PDwAAC4uroiKytLtl5RURGuXbsGV1dXqSYzM1NWU/r8YTWl4+XRaDTQarWyBxEREdVcJhuaXnrpJRw9ehQpKSnSQ6/XY+rUqdi+fTsAwNfXF9nZ2UhOTpbWS0hIQElJCXx8fKSaxMREFBYWSjVxcXFo1qwZ6tSpI9XEx8fL9h8XFwdfX9/HPU0iIiIyE1bVufPc3FycO3dOep6WloaUlBTUrVsXDRs2hKOjo6ze2toarq6uaNasGQCgRYsW6NOnD8aOHYuoqCgUFhYiLCwMI0aMkG5P8MILL2DevHkICQnB9OnTcfz4cSxbtgwffvihtN1JkyahZ8+eWLx4MQIDA7F+/XocPHhQdlsCIiIi+ner1iNNBw8eRLt27dCuXTsAwJQpU9CuXTvMnj1b8TbWrl2L5s2bw8/PD/369UO3bt1kYUen02HHjh1IS0tDhw4dEBERgdmzZ8vu5dSlSxesW7cOq1atQtu2bfH9998jJiYGrVq1qrrJEhERkVkzmfs0mTvep4mIiMj81Jj7NBERERGZCoYmIiIiIgUYmoiIiIgUYGgiIiIiUoChiYiIiEgBhiYiIiIiBRiaiIiIiBRgaCIiIiJSgKGJiIiISAGGJiIiIiIFGJqIiIiIFGBoIiIiIlKAoYmIiIhIAYYmIiIiIgUYmoiIiIgUYGgiIiIiUoChiYiIiEgBhiYiIiIiBRiaiIiIiBRgaCIiIiJSgKGJiIiISAGGJiIiIiIFGJqIiIiIFGBoIiIiIlKAoYmIiIhIAYYmIiIiIgUYmoiIiIgUYGgiIiIiUoChiYiIiEgBhiYiIiIiBRiaiIiIiBSo1tCUmJiIAQMGQK/XQ6VSISYmRhorLCzE9OnT0bp1a9jZ2UGv12PUqFG4fPmybBvXrl1DUFAQtFotHBwcEBISgtzcXFnN0aNH0b17d9jY2MDNzQ0LFy4s08vGjRvRvHlz2NjYoHXr1vj5558fy5yJiIjIPFVraMrLy0Pbtm3x8ccflxm7efMmDh06hFmzZuHQoUP44YcfkJqaiueee05WFxQUhBMnTiAuLg5btmxBYmIixo0bJ40bjUb07t0b7u7uSE5OxqJFizB37lysWrVKqtm3bx9GjhyJkJAQHD58GIMGDcKgQYNw/Pjxxzd5IiIiMisqIYSo7iYAQKVSYdOmTRg0aNB9aw4cOICnnnoK6enpaNiwIU6dOgUvLy8cOHAAHTt2BADExsaiX79+uHTpEvR6PVauXIk333wTBoMBarUaADBjxgzExMTg9OnTAIDhw4cjLy8PW7ZskfbVuXNneHt7IyoqSlH/RqMROp0OOTk50Gq1lXwV7s9jxtaH1lxYEFjl+yUiIqrJKvL326yuacrJyYFKpYKDgwMAICkpCQ4ODlJgAgB/f39YWFhg//79Uk2PHj2kwAQAAQEBSE1NxfXr16Uaf39/2b4CAgKQlJR0317y8/NhNBplDyIiIqq5zCY03b59G9OnT8fIkSOlJGgwGODs7Cyrs7KyQt26dWEwGKQaFxcXWU3p84fVlI6XJzIyEjqdTnq4ubk92gSJiIjIpJlFaCosLMTzzz8PIQRWrlxZ3e0AAGbOnImcnBzpcfHixepuiYiIiB4jq+pu4GFKA1N6ejoSEhJk5xtdXV2RlZUlqy8qKsK1a9fg6uoq1WRmZspqSp8/rKZ0vDwajQYajabyEyMiIiKzYtJHmkoD09mzZ7Fz5044OjrKxn19fZGdnY3k5GRpWUJCAkpKSuDj4yPVJCYmorCwUKqJi4tDs2bNUKdOHakmPj5etu24uDj4+vo+rqkRERGRmanW0JSbm4uUlBSkpKQAANLS0pCSkoKMjAwUFhZi2LBhOHjwINauXYvi4mIYDAYYDAYUFBQAAFq0aIE+ffpg7Nix+P3337F3716EhYVhxIgR0Ov1AIAXXngBarUaISEhOHHiBDZs2IBly5ZhypQpUh+TJk1CbGwsFi9ejNOnT2Pu3Lk4ePAgwsLC/vHXhIiIiExTtd5yYPfu3ejVq1eZ5cHBwZg7dy48PT3LXW/Xrl14+umnAdy5uWVYWBh++uknWFhYYOjQoVi+fDns7e2l+qNHjyI0NBQHDhyAk5MTXnvtNUyfPl22zY0bN+Ktt97ChQsX0LRpUyxcuBD9+vVTPBfecoCIiMj8VOTvt8ncp8ncMTQRERGZnxp7nyYiIiKi6sLQRERERKQAQxMRERGRAgxNRERERAowNBEREREpwNBEREREpABDExEREZECDE1ERERECjA0ERERESnA0ERERESkAEMTERERkQIMTUREREQKMDQRERERKcDQRERERKQAQxMRERGRAgxNRERERAowNBEREREpwNBEREREpABDExEREZECDE1ERERECjA0ERERESnA0ERERESkAEMTERERkQIMTUREREQKMDQRERERKcDQRERERKQAQxMRERGRAgxNRERERApUKjSdP3++qvsgIiIiMmmVCk1NmjRBr169sGbNGty+fbuqeyIiIiIyOZUKTYcOHUKbNm0wZcoUuLq6Yvz48fj999+rujciIiIik1Gp0OTt7Y1ly5bh8uXL+PLLL3HlyhV069YNrVq1wpIlS3D16lVF20lMTMSAAQOg1+uhUqkQExMjGxdCYPbs2ahfvz5sbW3h7++Ps2fPymquXbuGoKAgaLVaODg4ICQkBLm5ubKao0ePonv37rCxsYGbmxsWLlxYppeNGzeiefPmsLGxQevWrfHzzz9X7EUhIiKiGu2RLgS3srLCkCFDsHHjRrz//vs4d+4cXn/9dbi5uWHUqFG4cuXKA9fPy8tD27Zt8fHHH5c7vnDhQixfvhxRUVHYv38/7OzsEBAQIDslGBQUhBMnTiAuLg5btmxBYmIixo0bJ40bjUb07t0b7u7uSE5OxqJFizB37lysWrVKqtm3bx9GjhyJkJAQHD58GIMGDcKgQYNw/PjxR3l5iIiIqAZRCSFEZVc+ePAgvvzyS6xfvx52dnYIDg5GSEgILl26hHnz5sFoNCo+badSqbBp0yYMGjQIwJ2jTHq9HhEREXj99dcBADk5OXBxcUF0dDRGjBiBU6dOwcvLCwcOHEDHjh0BALGxsejXrx8uXboEvV6PlStX4s0334TBYIBarQYAzJgxAzExMTh9+jQAYPjw4cjLy8OWLVukfjp37gxvb29ERUUp6t9oNEKn0yEnJwdarVbROhXhMWPrQ2suLAis8v0SERHVZBX5+12pI01LlixB69at0aVLF1y+fBlff/010tPT8c4778DT0xPdu3dHdHQ0Dh06VKkJAEBaWhoMBgP8/f2lZTqdDj4+PkhKSgIAJCUlwcHBQQpMAODv7w8LCwvs379fqunRo4cUmAAgICAAqampuH79ulRz935Ka0r3U578/HwYjUbZg4iIiGquSoWmlStX4oUXXkB6ejpiYmLQv39/WFjIN+Xs7Iwvvvii0o0ZDAYAgIuLi2y5i4uLNGYwGODs7Cwbt7KyQt26dWU15W3j7n3cr6Z0vDyRkZHQ6XTSw83NraJTJCIiIjNiVZmV7r0YuzxqtRrBwcGV2bxZmDlzJqZMmSI9NxqNDE5EREQ1WKWONK1evRobN24ss3zjxo346quvHrkpAHB1dQUAZGZmypZnZmZKY66ursjKypKNFxUV4dq1a7Ka8rZx9z7uV1M6Xh6NRgOtVit7EBERUc1VqdAUGRkJJyenMsudnZ3x3nvvPXJTAODp6QlXV1fEx8dLy4xGI/bv3w9fX18AgK+vL7Kzs5GcnCzVJCQkoKSkBD4+PlJNYmIiCgsLpZq4uDg0a9YMderUkWru3k9pTel+iIiIiCoVmjIyMuDp6Vlmubu7OzIyMhRvJzc3FykpKUhJSQFw5+LvlJQUZGRkQKVSITw8HO+88w42b96MY8eOYdSoUdDr9dIn7Fq0aIE+ffpg7Nix+P3337F3716EhYVhxIgR0Ov1AIAXXngBarUaISEhOHHiBDZs2IBly5bJTq1NmjQJsbGxWLx4MU6fPo25c+fi4MGDCAsLq8zLQ0RERDVQpa5pcnZ2xtGjR+Hh4SFbfuTIETg6OirezsGDB9GrVy/peWmQCQ4ORnR0NKZNm4a8vDyMGzcO2dnZ6NatG2JjY2FjYyOts3btWoSFhcHPzw8WFhYYOnQoli9fLo3rdDrs2LEDoaGh6NChA5ycnDB79mzZvZy6dOmCdevW4a233sIbb7yBpk2bIiYmBq1ataroS0NEREQ1VKXu0zR9+nRs2LABq1evRo8ePQAAe/bswcsvv4xhw4bhgw8+qPJGTR3v00RERGR+KvL3u1JHmt5++21cuHABfn5+sLK6s4mSkhKMGjWqyq5pIiIiIjIllQpNarUaGzZswNtvv40jR47A1tYWrVu3hru7e1X3R0RERGQSKhWaSj355JN48sknq6oXIiIiIpNVqdBUXFyM6OhoxMfHIysrCyUlJbLxhISEKmmOiIiIyFRUKjRNmjQJ0dHRCAwMRKtWraBSqaq6LyIiIiKTUqnQtH79enz33Xfo169fVfdDREREZJIqdXNLtVqNJk2aVHUvRERERCarUqEpIiICy5YtQyVu8URERERklip1eu7XX3/Frl27sG3bNrRs2RLW1tay8R9++KFKmiMiIiIyFZUKTQ4ODhg8eHBV90JERERksioVmlavXl3VfRARERGZtEpd0wQARUVF2LlzJz799FPcuHEDAHD58mXk5uZWWXNEREREpqJSR5rS09PRp08fZGRkID8/H88++yxq166N999/H/n5+YiKiqrqPomIiIiqVaWONE2aNAkdO3bE9evXYWtrKy0fPHgw4uPjq6w5IiIiIlNRqSNNv/zyC/bt2we1Wi1b7uHhgT///LNKGiMiIiIyJZU60lRSUoLi4uIyyy9duoTatWs/clNEREREpqZSoal3795YunSp9FylUiE3Nxdz5szhV6sQERFRjVSp03OLFy9GQEAAvLy8cPv2bbzwwgs4e/YsnJyc8O2331Z1j0RERETVrlKhqUGDBjhy5AjWr1+Po0ePIjc3FyEhIQgKCpJdGE5ERERUU1QqNAGAlZUVXnzxxarshYiIiMhkVSo0ff311w8cHzVqVKWaISIiIjJVlQpNkyZNkj0vLCzEzZs3oVarUatWLYYmIiIiqnEq9em569evyx65ublITU1Ft27deCE4ERER1UiV/u65ezVt2hQLFiwocxSKiIiIqCaostAE3Lk4/PLly1W5SSIiIiKTUKlrmjZv3ix7LoTAlStX8NFHH6Fr165V0hgRERGRKalUaBo0aJDsuUqlQr169fDMM89g8eLFVdEXERERkUmpVGgqKSmp6j6IiIiITFqVXtNEREREVFNV6kjTlClTFNcuWbKkMrsgIiIiMimVCk2HDx/G4cOHUVhYiGbNmgEAzpw5A0tLS7Rv316qU6lUVdMlERERUTWr1Om5AQMGoEePHrh06RIOHTqEQ4cO4eLFi+jVqxf69++PXbt2YdeuXUhISHik5oqLizFr1ix4enrC1tYWjRs3xttvvw0hhFQjhMDs2bNRv3592Nrawt/fH2fPnpVt59q1awgKCoJWq4WDgwNCQkKQm5srqzl69Ci6d+8OGxsbuLm5YeHChY/UOxEREdUslQpNixcvRmRkJOrUqSMtq1OnDt55550q/fTc+++/j5UrV+Kjjz7CqVOn8P7772PhwoVYsWKFVLNw4UIsX74cUVFR2L9/P+zs7BAQEIDbt29LNUFBQThx4gTi4uKwZcsWJCYmYty4cdK40WhE79694e7ujuTkZCxatAhz587FqlWrqmwuREREZN4qdXrOaDTi6tWrZZZfvXoVN27ceOSmSu3btw8DBw5EYGAgAMDDwwPffvstfv/9dwB3jjItXboUb731FgYOHAjgzpcJu7i4ICYmBiNGjMCpU6cQGxuLAwcOoGPHjgCAFStWoF+/fvjggw+g1+uxdu1aFBQU4Msvv4RarUbLli2RkpKCJUuWyMIVERER/XtV6kjT4MGDMWbMGPzwww+4dOkSLl26hP/9738ICQnBkCFDqqy5Ll26ID4+HmfOnAEAHDlyBL/++iv69u0LAEhLS4PBYIC/v7+0jk6ng4+PD5KSkgAASUlJcHBwkAITAPj7+8PCwgL79++Xanr06AG1Wi3VBAQEIDU1FdevXy+3t/z8fBiNRtmDiIiIaq5KHWmKiorC66+/jhdeeAGFhYV3NmRlhZCQECxatKjKmpsxYwaMRiOaN28OS0tLFBcX491330VQUBAAwGAwAABcXFxk67m4uEhjBoMBzs7OsnErKyvUrVtXVuPp6VlmG6Vjd5+GLBUZGYl58+ZVwSyJiIjIHFQqNNWqVQuffPIJFi1ahD/++AMA0LhxY9jZ2VVpc9999x3Wrl2LdevWSafMwsPDodfrERwcXKX7qqiZM2fKbr1gNBrh5uZWjR0RERHR41Sp0FTqypUruHLlCnr06AFbW1sIIar0NgNTp07FjBkzMGLECABA69atkZ6ejsjISAQHB8PV1RUAkJmZifr160vrZWZmwtvbGwDg6uqKrKws2XaLiopw7do1aX1XV1dkZmbKakqfl9bcS6PRQKPRPPokiYiIyCxU6pqmv//+G35+fnjyySfRr18/XLlyBQAQEhKCiIiIKmvu5s2bsLCQt2hpaSl9jYunpydcXV0RHx8vjRuNRuzfvx++vr4AAF9fX2RnZyM5OVmqSUhIQElJCXx8fKSaxMRE6VQjAMTFxaFZs2blnpojIiKif59KhabJkyfD2toaGRkZqFWrlrR8+PDhiI2NrbLmBgwYgHfffRdbt27FhQsXsGnTJixZsgSDBw8GcOfmmeHh4XjnnXewefNmHDt2DKNGjYJer5e+VLhFixbo06cPxo4di99//x179+5FWFgYRowYAb1eDwB44YUXoFarERISghMnTmDDhg1YtmxZhe58TkRERDVbpU7P7dixA9u3b0eDBg1ky5s2bYr09PQqaQy4c2uAWbNm4b///S+ysrKg1+sxfvx4zJ49W6qZNm0a8vLyMG7cOGRnZ6Nbt26IjY2FjY2NVLN27VqEhYXBz88PFhYWGDp0KJYvXy6N63Q67NixA6GhoejQoQOcnJwwe/Zs3m6AiIiIJCpx9+21FapduzYOHTqEpk2bonbt2jhy5AgaNWqEgwcPIiAgAH///ffj6NWkGY1G6HQ65OTkQKvVVvn2PWZsfWjNhQWBVb5fIiKimqwif78rdXque/fu+Prrr6XnKpUKJSUlWLhwIXr16lWZTRIRERGZtEqdnlu4cCH8/Pxw8OBBFBQUYNq0aThx4gSuXbuGvXv3VnWPRERERNWuUkeaWrVqhTNnzqBbt24YOHAg8vLyMGTIEBw+fBiNGzeu6h6JiIiIql2FjzQVFhaiT58+iIqKwptvvvk4eiIiIiIyORU+0mRtbY2jR48+jl6IiIiITFalTs+9+OKL+OKLL6q6FyIiIiKTVakLwYuKivDll19i586d6NChQ5nvnFuyZEmVNEdERERkKioUms6fPw8PDw8cP34c7du3BwCcOXNGVlOV3z1HREREZCoqFJqaNm2KK1euYNeuXQDufG3K8uXL4eLi8liaIyIiIjIVFbqm6d6bh2/btg15eXlV2hARERGRKarUheClKvENLERERERmqUKhSaVSlblmidcwERER0b9Bha5pEkJg9OjR0Gg0AIDbt2/j1VdfLfPpuR9++KHqOiQiIiIyARUKTcHBwbLnL774YpU2Q0RERGSqKhSaVq9e/bj6ICIiIjJpj3QhOBEREdG/BUMTERERkQIMTUREREQKMDQRERERKcDQRERERKQAQxMRERGRAgxNRERERAowNBEREREpwNBEREREpABDExEREZECDE1ERERECjA0ERERESnA0ERERESkAEMTERERkQIMTUREREQKMDQRERERKWDyoenPP//Eiy++CEdHR9ja2qJ169Y4ePCgNC6EwOzZs1G/fn3Y2trC398fZ8+elW3j2rVrCAoKglarhYODA0JCQpCbmyurOXr0KLp37w4bGxu4ublh4cKF/8j8iIiIyDyYdGi6fv06unbtCmtra2zbtg0nT57E4sWLUadOHalm4cKFWL58OaKiorB//37Y2dkhICAAt2/flmqCgoJw4sQJxMXFYcuWLUhMTMS4ceOkcaPRiN69e8Pd3R3JyclYtGgR5s6di1WrVv2j8yUiIiLTpRJCiOpu4n5mzJiBvXv34pdffil3XAgBvV6PiIgIvP766wCAnJwcuLi4IDo6GiNGjMCpU6fg5eWFAwcOoGPHjgCA2NhY9OvXD5cuXYJer8fKlSvx5ptvwmAwQK1WS/uOiYnB6dOnFfVqNBqh0+mQk5MDrVZbBbOX85ix9aE1FxYEVvl+iYiIarKK/P026SNNmzdvRseOHfGf//wHzs7OaNeuHT777DNpPC0tDQaDAf7+/tIynU4HHx8fJCUlAQCSkpLg4OAgBSYA8Pf3h4WFBfbv3y/V9OjRQwpMABAQEIDU1FRcv3693N7y8/NhNBplDyIiIqq5TDo0nT9/HitXrkTTpk2xfft2TJgwARMnTsRXX30FADAYDAAAFxcX2XouLi7SmMFggLOzs2zcysoKdevWldWUt42793GvyMhI6HQ66eHm5vaIsyUiIiJTZtKhqaSkBO3bt8d7772Hdu3aYdy4cRg7diyioqKquzXMnDkTOTk50uPixYvV3RIRERE9RlbV3cCD1K9fH15eXrJlLVq0wP/+9z8AgKurKwAgMzMT9evXl2oyMzPh7e0t1WRlZcm2UVRUhGvXrknru7q6IjMzU1ZT+ry05l4ajQYajaaSM3s8eN0TERHR42PSR5q6du2K1NRU2bIzZ87A3d0dAODp6QlXV1fEx8dL40ajEfv374evry8AwNfXF9nZ2UhOTpZqEhISUFJSAh8fH6kmMTERhYWFUk1cXByaNWsm+6QeERER/XuZdGiaPHkyfvvtN7z33ns4d+4c1q1bh1WrViE0NBQAoFKpEB4ejnfeeQebN2/GsWPHMGrUKOj1egwaNAjAnSNTffr0wdixY/H7779j7969CAsLw4gRI6DX6wEAL7zwAtRqNUJCQnDixAls2LABy5Ytw5QpU6pr6kRERGRiTPr0XKdOnbBp0ybMnDkT8+fPh6enJ5YuXYqgoCCpZtq0acjLy8O4ceOQnZ2Nbt26ITY2FjY2NlLN2rVrERYWBj8/P1hYWGDo0KFYvny5NK7T6bBjxw6EhoaiQ4cOcHJywuzZs2X3ciIiIqJ/N5O+T5M5MYX7NCnBa5qIiIj+T425TxMRERGRqWBoIiIiIlKAoYmIiIhIAYYmIiIiIgUYmoiIiIgUYGgiIiIiUoChiYiIiEgBhiYiIiIiBRiaiIiIiBRgaCIiIiJSgKGJiIiISAGGJiIiIiIFGJqIiIiIFGBoIiIiIlKAoYmIiIhIAYYmIiIiIgUYmoiIiIgUYGgiIiIiUoChiYiIiEgBhiYiIiIiBRiaiIiIiBRgaCIiIiJSgKGJiIiISAGGJiIiIiIFGJqIiIiIFGBoIiIiIlKAoYmIiIhIAYYmIiIiIgUYmoiIiIgUYGgiIiIiUoChiYiIiEgBswpNCxYsgEqlQnh4uLTs9u3bCA0NhaOjI+zt7TF06FBkZmbK1svIyEBgYCBq1aoFZ2dnTJ06FUVFRbKa3bt3o3379tBoNGjSpAmio6P/gRkRERGRuTCb0HTgwAF8+umnaNOmjWz55MmT8dNPP2Hjxo3Ys2cPLl++jCFDhkjjxcXFCAwMREFBAfbt24evvvoK0dHRmD17tlSTlpaGwMBA9OrVCykpKQgPD8crr7yC7du3/2PzIyIiItNmFqEpNzcXQUFB+Oyzz1CnTh1peU5ODr744gssWbIEzzzzDDp06IDVq1dj3759+O233wAAO3bswMmTJ7FmzRp4e3ujb9++ePvtt/Hxxx+joKAAABAVFQVPT08sXrwYLVq0QFhYGIYNG4YPP/ywWuZLREREpscsQlNoaCgCAwPh7+8vW56cnIzCwkLZ8ubNm6Nhw4ZISkoCACQlJaF169ZwcXGRagICAmA0GnHixAmp5t5tBwQESNsgIiIisqruBh5m/fr1OHToEA4cOFBmzGAwQK1Ww8HBQbbcxcUFBoNBqrk7MJWOl449qMZoNOLWrVuwtbUts+/8/Hzk5+dLz41GY8UnR0RERGbDpI80Xbx4EZMmTcLatWthY2NT3e3IREZGQqfTSQ83N7fqbomIiIgeI5MOTcnJycjKykL79u1hZWUFKysr7NmzB8uXL4eVlRVcXFxQUFCA7Oxs2XqZmZlwdXUFALi6upb5NF3p84fVaLXaco8yAcDMmTORk5MjPS5evFgVUyYiIiITZdKhyc/PD8eOHUNKSor06NixI4KCgqT/tra2Rnx8vLROamoqMjIy4OvrCwDw9fXFsWPHkJWVJdXExcVBq9XCy8tLqrl7G6U1pdsoj0ajgVarlT2IiIio5jLpa5pq166NVq1ayZbZ2dnB0dFRWh4SEoIpU6agbt260Gq1eO211+Dr64vOnTsDAHr37g0vLy+89NJLWLhwIQwGA9566y2EhoZCo9EAAF599VV89NFHmDZtGl5++WUkJCTgu+++w9atW//ZCRMREZHJMunQpMSHH34ICwsLDB06FPn5+QgICMAnn3wijVtaWmLLli2YMGECfH19YWdnh+DgYMyfP1+q8fT0xNatWzF58mQsW7YMDRo0wOeff46AgIDqmBIRERGZIJUQQlR3EzWB0WiETqdDTk7OYzlV5zGjao56XVgQWCXbISIiqgkq8vfbpK9pIiIiIjIVDE1ERERECjA0ERERESnA0ERERESkAEMTERERkQIMTUREREQKmP19mqhilNy6gLclICIiKotHmoiIiIgUYGgiIiIiUoChiYiIiEgBhiYiIiIiBRiaiIiIiBRgaCIiIiJSgKGJiIiISAGGJiIiIiIFGJqIiIiIFGBoIiIiIlKAoYmIiIhIAYYmIiIiIgUYmoiIiIgUYGgiIiIiUoChiYiIiEgBhiYiIiIiBRiaiIiIiBRgaCIiIiJSgKGJiIiISAGGJiIiIiIFGJqIiIiIFGBoIiIiIlKAoYmIiIhIAYYmIiIiIgVMPjRFRkaiU6dOqF27NpydnTFo0CCkpqbKam7fvo3Q0FA4OjrC3t4eQ4cORWZmpqwmIyMDgYGBqFWrFpydnTF16lQUFRXJanbv3o327dtDo9GgSZMmiI6OftzTIyIiIjNh8qFpz549CA0NxW+//Ya4uDgUFhaid+/eyMvLk2omT56Mn376CRs3bsSePXtw+fJlDBkyRBovLi5GYGAgCgoKsG/fPnz11VeIjo7G7NmzpZq0tDQEBgaiV69eSElJQXh4OF555RVs3779H50vERERmSaVEEJUdxMVcfXqVTg7O2PPnj3o0aMHcnJyUK9ePaxbtw7Dhg0DAJw+fRotWrRAUlISOnfujG3btqF///64fPkyXFxcAABRUVGYPn06rl69CrVajenTp2Pr1q04fvy4tK8RI0YgOzsbsbGxD+3LaDRCp9MhJycHWq22yuftMWNrlW/zfi4sCPzH9kVERFSdKvL32+SPNN0rJycHAFC3bl0AQHJyMgoLC+Hv7y/VNG/eHA0bNkRSUhIAICkpCa1bt5YCEwAEBATAaDTixIkTUs3d2yitKd0GERER/btZVXcDFVFSUoLw8HB07doVrVq1AgAYDAao1Wo4ODjIal1cXGAwGKSauwNT6Xjp2INqjEYjbt26BVtbW9lYfn4+8vPzpedGo/HRJ2gilBzV4tEoIiL6tzGrI02hoaE4fvw41q9fX92tIDIyEjqdTnq4ublVd0tERET0GJlNaAoLC8OWLVuwa9cuNGjQQFru6uqKgoICZGdny+ozMzPh6uoq1dz7abrS5w+r0Wq1ZY4yAcDMmTORk5MjPS5evPjIcyQiIiLTZfKhSQiBsLAwbNq0CQkJCfD09JSNd+jQAdbW1oiPj5eWpaamIiMjA76+vgAAX19fHDt2DFlZWVJNXFwctFotvLy8pJq7t1FaU7qNe2k0Gmi1WtmDiIiIai6Tv6YpNDQU69atw48//ojatWtL1yDpdDrY2tpCp9MhJCQEU6ZMQd26daHVavHaa6/B19cXnTt3BgD07t0bXl5eeOmll7Bw4UIYDAa89dZbCA0NhUajAQC8+uqr+OijjzBt2jS8/PLLSEhIwHfffYetW/+5T60RERGR6TL5I00rV65ETk4Onn76adSvX196bNiwQar58MMP0b9/fwwdOhQ9evSAq6srfvjhB2nc0tISW7ZsgaWlJXx9ffHiiy9i1KhRmD9/vlTj6emJrVu3Ii4uDm3btsXixYvx+eefIyAg4B+dLxEREZkms7tPk6mqSfdpUoKfniMiopqgRt+niYiIiKg6MDQRERERKcDQRERERKQAQxMRERGRAgxNRERERAowNBEREREpYPI3tyTTxC/1JSKifxseaSIiIiJSgKGJiIiISAGGJiIiIiIFGJqIiIiIFGBoIiIiIlKAoYmIiIhIAYYmIiIiIgUYmoiIiIgUYGgiIiIiUoChiYiIiEgBfo0KPTb8qhUiIqpJeKSJiIiISAGGJiIiIiIFGJqIiIiIFGBoIiIiIlKAoYmIiIhIAX56jqoVP2FHRETmgkeaiIiIiBRgaCIiIiJSgKfnyOTxFB4RUc1nDr/reaSJiIiISAEeaaIawRz+HwoREZk3HmkiIiIiUoBHmuhfg0ejiIjoUTA03ePjjz/GokWLYDAY0LZtW6xYsQJPPfVUdbdF/xAGKyIiuh+Gprts2LABU6ZMQVRUFHx8fLB06VIEBAQgNTUVzs7O1d0emQgGKyKifyeGprssWbIEY8eOxZgxYwAAUVFR2Lp1K7788kvMmDGjmrsjc6IkWCnB8EVEZDoYmv6/goICJCcnY+bMmdIyCwsL+Pv7IykpqRo7o3+zqgpfpoZhkIjMEUPT//fXX3+huLgYLi4usuUuLi44ffp0mfr8/Hzk5+dLz3NycgAARqPxsfRXkn/zsWyXqDo0nLyxulsg+scdnxfw0JpWc7ab1HaUqKp9Kfk79zj+xpZuUwjx0FqGpkqKjIzEvHnzyix3c3Orhm6IiMjU6ZbWzO38k/t6nD3fuHEDOp3ugTUMTf+fk5MTLC0tkZmZKVuemZkJV1fXMvUzZ87ElClTpOclJSW4du0aHB0doVKpqqwvo9EINzc3XLx4EVqttsq2awo4N/NVk+fHuZknzs08mcLchBC4ceMG9Hr9Q2sZmv4/tVqNDh06ID4+HoMGDQJwJwjFx8cjLCysTL1Go4FGo5Etc3BweGz9abXaGvePpRTnZr5q8vw4N/PEuZmn6p7bw44wlWJousuUKVMQHByMjh074qmnnsLSpUuRl5cnfZqOiIiI/r0Ymu4yfPhwXL16FbNnz4bBYIC3tzdiY2PLXBxORERE/z4MTfcICwsr93RcddFoNJgzZ06ZU4E1Aedmvmry/Dg388S5mSdzm5tKKPmMHREREdG/nEV1N0BERERkDhiaiIiIiBRgaCIiIiJSgKGJiIiISAGGJhP38ccfw8PDAzY2NvDx8cHvv/9e3S3JJCYmYsCAAdDr9VCpVIiJiZGNCyEwe/Zs1K9fH7a2tvD398fZs2dlNdeuXUNQUBC0Wi0cHBwQEhKC3NxcWc3Ro0fRvXt32NjYwM3NDQsXLnzcU0NkZCQ6deqE2rVrw9nZGYMGDUJqaqqs5vbt2wgNDYWjoyPs7e0xdOjQMneVz8jIQGBgIGrVqgVnZ2dMnToVRUVFsprdu3ejffv20Gg0aNKkCaKjox/r3FauXIk2bdpIN5Tz9fXFtm3bzH5e5VmwYAFUKhXCw8OlZeY6v7lz50KlUskezZs3N/t5lfrzzz/x4osvwtHREba2tmjdujUOHjwojZvz7xMPD48y751KpUJoaCgA837viouLMWvWLHh6esLW1haNGzfG22+/LfsuN3N+72QEmaz169cLtVotvvzyS3HixAkxduxY4eDgIDIzM6u7NcnPP/8s3nzzTfHDDz8IAGLTpk2y8QULFgidTidiYmLEkSNHxHPPPSc8PT3FrVu3pJo+ffqItm3bit9++0388ssvokmTJmLkyJHSeE5OjnBxcRFBQUHi+PHj4ttvvxW2trbi008/faxzCwgIEKtXrxbHjx8XKSkpol+/fqJhw4YiNzdXqnn11VeFm5ubiI+PFwcPHhSdO3cWXbp0kcaLiopEq1athL+/vzh8+LD4+eefhZOTk5g5c6ZUc/78eVGrVi0xZcoUcfLkSbFixQphaWkpYmNjH9vcNm/eLLZu3SrOnDkjUlNTxRtvvCGsra3F8ePHzXpe9/r999+Fh4eHaNOmjZg0aZK03FznN2fOHNGyZUtx5coV6XH16lWzn5cQQly7dk24u7uL0aNHi/3794vz58+L7du3i3Pnzkk15vz7JCsrS/a+xcXFCQBi165dQgjzfu/effdd4ejoKLZs2SLS0tLExo0bhb29vVi2bJlUY87v3d0YmkzYU089JUJDQ6XnxcXFQq/Xi8jIyGrs6v7uDU0lJSXC1dVVLFq0SFqWnZ0tNBqN+Pbbb4UQQpw8eVIAEAcOHJBqtm3bJlQqlfjzzz+FEEJ88sknok6dOiI/P1+qmT59umjWrNljnpFcVlaWACD27NkjhLgzF2tra7Fx40ap5tSpUwKASEpKEkLcCZUWFhbCYDBINStXrhRarVaaz7Rp00TLli1l+xo+fLgICAh43FOSqVOnjvj8889rzLxu3LghmjZtKuLi4kTPnj2l0GTO85szZ45o27ZtuWPmPC8h7vyb7tat233Ha9rvk0mTJonGjRuLkpISs3/vAgMDxcsvvyxbNmTIEBEUFCSEqFnvHU/PmaiCggIkJyfD399fWmZhYQF/f38kJSVVY2fKpaWlwWAwyOag0+ng4+MjzSEpKQkODg7o2LGjVOPv7w8LCwvs379fqunRowfUarVUExAQgNTUVFy/fv0fmg2Qk5MDAKhbty4AIDk5GYWFhbL5NW/eHA0bNpTNr3Xr1rK7ygcEBMBoNOLEiRNSzd3bKK35p97n4uJirF+/Hnl5efD19a0x8woNDUVgYGCZHsx9fmfPnoVer0ejRo0QFBSEjIyMGjGvzZs3o2PHjvjPf/4DZ2dntGvXDp999pk0XpN+nxQUFGDNmjV4+eWXoVKpzP6969KlC+Lj43HmzBkAwJEjR/Drr7+ib9++AGrWe8fQZKL++usvFBcXl/kKFxcXFxgMhmrqqmJK+3zQHAwGA5ydnWXjVlZWqFu3rqymvG3cvY/HraSkBOHh4ejatStatWol7VutVpf5ouZ75/ew3u9XYzQacevWrccxHQDAsWPHYG9vD41Gg1dffRWbNm2Cl5eX2c8LANavX49Dhw4hMjKyzJg5z8/HxwfR0dGIjY3FypUrkZaWhu7du+PGjRtmPS8AOH/+PFauXImmTZti+/btmDBhAiZOnIivvvpK1l9N+H0SExOD7OxsjB49WtqvOb93M2bMwIgRI9C8eXNYW1ujXbt2CA8PR1BQkKy/mvDe8WtUiBQIDQ3F8ePH8euvv1Z3K1WmWbNmSElJQU5ODr7//nsEBwdjz5491d3WI7t48SImTZqEuLg42NjYVHc7Var0/7kDQJs2beDj4wN3d3d89913sLW1rcbOHl1JSQk6duyI9957DwDQrl07HD9+HFFRUQgODq7m7qrWF198gb59+0Kv11d3K1Xiu+++w9q1a7Fu3Tq0bNkSKSkpCA8Ph16vr3HvHY80mSgnJydYWlqW+fREZmYmXF1dq6mriint80FzcHV1RVZWlmy8qKgI165dk9WUt4279/E4hYWFYcuWLdi1axcaNGggLXd1dUVBQQGys7PL9FaR3u9Xo9VqH+sfQrVajSZNmqBDhw6IjIxE27ZtsWzZMrOfV3JyMrKystC+fXtYWVnBysoKe/bswfLly2FlZQUXFxeznt/dHBwc8OSTT+LcuXNm/77Vr18fXl5esmUtWrSQTj/WlN8n6enp2LlzJ1555RVpmbm/d1OnTpWONrVu3RovvfQSJk+eLB3prSnvHcDQZLLUajU6dOiA+Ph4aVlJSQni4+Ph6+tbjZ0p5+npCVdXV9kcjEYj9u/fL83B19cX2dnZSE5OlmoSEhJQUlICHx8fqSYxMRGFhYVSTVxcHJo1a4Y6deo8tv6FEAgLC8OmTZuQkJAAT09P2XiHDh1gbW0tm19qaioyMjJk8zt27Jjsl0FcXBy0Wq30B8LX11e2jdKaf/p9LikpQX5+vtnPy8/PD8eOHUNKSor06NixI4KCgqT/Nuf53S03Nxd//PEH6tevb/bvW9euXcvc0uPMmTNwd3cHYP6/T0qtXr0azs7OCAwMlJaZ+3t38+ZNWFjI44SlpSVKSkoA1Jz3DgBvOWDK1q9fLzQajYiOjhYnT54U48aNEw4ODrJPT1S3GzduiMOHD4vDhw8LAGLJkiXi8OHDIj09XQhx52OmDg4O4scffxRHjx4VAwcOLPdjpu3atRP79+8Xv/76q2jatKnsY6bZ2dnCxcVFvPTSS+L48eNi/fr1olatWo/9Y6YTJkwQOp1O7N69W/ZR4Zs3b0o1r776qmjYsKFISEgQBw8eFL6+vsLX11caL/2YcO/evUVKSoqIjY0V9erVK/djwlOnThWnTp0SH3/88WP/mPCMGTPEnj17RFpamjh69KiYMWOGUKlUYseOHWY9r/u5+9NzQpjv/CIiIsTu3btFWlqa2Lt3r/D39xdOTk4iKyvLrOclxJ3bQ1hZWYl3331XnD17Vqxdu1bUqlVLrFmzRqox598nQtz5BHTDhg3F9OnTy4yZ83sXHBwsnnjiCemWAz/88INwcnIS06ZNk2rM/b0rxdBk4lasWCEaNmwo1Gq1eOqpp8Rvv/1W3S3J7Nq1SwAo8wgODhZC3Pmo6axZs4SLi4vQaDTCz89PpKamyrbx999/i5EjRwp7e3uh1WrFmDFjxI0bN2Q1R44cEd26dRMajUY88cQTYsGCBY99buXNC4BYvXq1VHPr1i3x3//+V9SpU0fUqlVLDB48WFy5ckW2nQsXLoi+ffsKW1tb4eTkJCIiIkRhYaGsZteuXcLb21uo1WrRqFEj2T4eh5dfflm4u7sLtVot6tWrJ/z8/KTAZM7zup97Q5O5zm/48OGifv36Qq1WiyeeeEIMHz5cdh8jc51XqZ9++km0atVKaDQa0bx5c7Fq1SrZuDn/PhFCiO3btwsAZXoWwrzfO6PRKCZNmiQaNmwobGxsRKNGjcSbb74puzWAub93pVRC3HXLTiIiIiIqF69pIiIiIlKAoYmIiIhIAYYmIiIiIgUYmoiIiIgUYGgiIiIiUoChiYiIiEgBhiYiIiIiBRiaiOgfceHCBahUKqSkpFR3KzVKdHQ0HBwcqrsNon8FhiYiUkylUj3wMXfu3Opu0WSVhsbSh6OjI3r37o3Dhw8r3oaHhweWLl0qWzZ8+HCcOXOmSnvdvXs3VCpVmS+QJfq3s6ruBojIfFy5ckX67w0bNmD27NmyL1m1t7evjrZMSnFxMVQqVZkvMC21c+dOtGzZEpcuXcLEiRPRt29fnD59utJHi2xtbR/rN9gT0f/hkSYiUszV1VV66HQ6qFQq6bmzszOWLFmCBg0aQKPRwNvbG7GxsffdVnFxMV5++WU0b94cGRkZAIAff/wR7du3h42NDRo1aoR58+ahqKhIWkelUuHzzz/H4MGDUatWLTRt2hSbN2+Wxq9fv46goCDUq1cPtra2aNq0KVavXn3fHp5++mmEhYUhLCwMOp0OTk5OmDVrFu7+dqn8/Hy8/vrreOKJJ2BnZwcfHx/s3r1bGi89PbZ582Z4eXlBo9FI8ymPo6MjXF1d0bFjR3zwwQfIzMzE/v378ccff2DgwIFwcXGBvb09OnXqhJ07d8p6TU9Px+TJk6WjVXfv/26P8jpeuHABvXr1AgDUqVMHKpUKo0ePvu98iP5V/tFvuiOiGmP16tVCp9NJz5csWSK0Wq349ttvxenTp8W0adOEtbW1OHPmjBBCiLS0NAFAHD58WNy+fVsMHjxYtGvXTmRlZQkhhEhMTBRarVZER0eLP/74Q+zYsUN4eHiIuXPnSvsAIBo0aCDWrVsnzp49KyZOnCjs7e3F33//LYQQIjQ0VHh7e4sDBw6ItLQ0ERcXJzZv3nzfOfTs2VPY29uLSZMmidOnT4s1a9aIWrVqyb4o9pVXXhFdunQRiYmJ4ty5c2LRokVCo9FI81q9erWwtrYWXbp0EXv37hWnT58WeXl5ZfZ19/xLHTp0SAAQmzdvFikpKSIqKkocO3ZMnDlzRrz11lvCxsZGpKenCyHufJlpgwYNxPz588WVK1ekL3O993141NexqKhI/O9//5O+WPbKlSsiOzv7gT8LRP8WDE1EVCn3/rHW6/Xi3XffldV06tRJ/Pe//xVC/F9o+OWXX4Sfn5/o1q2b7I+xn5+feO+992Trf/PNN6J+/frScwDirbfekp7n5uYKAGLbtm1CCCEGDBggxowZo3gOPXv2FC1atBAlJSXSsunTp4sWLVoIIYRIT08XlpaW4s8//5St5+fnJ2bOnCm9DgBESkrKA/d1b2i6fv26GDx4sLC3txcGg6HcdVq2bClWrFghPXd3dxcffvihrObe96EqXsddu3YJAOL69esPnBPRvw2vaSKiR2Y0GnH58mV07dpVtrxr1644cuSIbNnIkSPRoEEDJCQkyK7FOXLkCPbu3Yt3331XWlZcXIzbt2/j5s2bqFWrFgCgTZs20ridnR20Wi2ysrIAABMmTMDQoUNx6NAh9O7dG4MGDUKXLl0e2Hvnzp2lU10A4Ovri8WLF6O4uBjHjh1DcXExnnzySdk6+fn5cHR0lJ6r1WpZXw/SpUsXWFhYIC8vD40aNcKGDRvg4uKC3NxczJ07F1u3bsWVK1dQVFSEW7duPfBUX3mq4nUkovIxNBHRP6pfv35Ys2YNkpKS8Mwzz0jLc3NzMW/ePAwZMqTMOjY2NtJ/W1tby8ZUKhVKSkoAAH379kV6ejp+/vlnxMXFwc/PD6Ghofjggw8q1Wtubi4sLS2RnJwMS0tL2djdF73b2trKgteDbNiwAV5eXnB0dJRdi/T6668jLi4OH3zwAZo0aQJbW1sMGzYMBQUFFe75UV9HIiofQxMRPTKtVgu9Xo+9e/eiZ8+e0vK9e/fiqaeektVOmDABrVq1wnPPPYetW7dK9e3bt0dqaiqaNGnySL3Uq1cPwcHBCA4ORvfu3TF16tQHhqb9+/fLnv/2229o2rQpLC0t0a5dOxQXFyMrKwvdu3d/pL5Kubm5oXHjxmWW7927F6NHj8bgwYMB3Ak/Fy5ckNWo1WoUFxc/cPtV8Tqq1WoAeOi+iP5tGJqIqEpMnToVc+bMQePGjeHt7Y3Vq1cjJSUFa9euLVP72muvobi4GP3798e2bdvQrVs3zJ49G/3790fDhg0xbNgwWFhY4MiRIzh+/DjeeecdRT3Mnj0bHTp0QMuWLZGfn48tW7agRYsWD1wnIyMDU6ZMwfjx43Ho0CGsWLECixcvBgA8+eSTCAoKwqhRo7B48WK0a9cOV69eRXx8PNq0aYPAwMCKv1D30bRpU/zwww8YMGAAVCoVZs2aVebIj4eHBxITEzFixAhoNBo4OTmV+xo86uvo7u4OlUqFLVu2oF+/frC1teXtJIjAWw4QURWZOHEipkyZgoiICLRu3RqxsbHYvHkzmjZtWm59eHg45s2bh379+mHfvn0ICAjAli1bsGPHDnTq1AmdO3fGhx9+CHd3d8U9qNVqzJw5E23atEGPHj1gaWmJ9evXP3CdUaNG4datW3jqqacQGhqKSZMmYdy4cdL46tWrMWrUKERERKBZs2YYNGgQDhw4gIYNGyruS4klS5agTp066NKlCwYMGICAgAC0b99eVjN//nxcuHABjRs3Rr169crdTlW8jk888QTmzZuHGTNmwMXFBWFhYY80N6KaQiXEXTckISL6F3n66afh7e1d5i7bRETl4ZEmIiIiIgUYmoiIiIgU4Ok5IiIiIgV4pImIiIhIAYYmIiIiIgUYmoiIiIgUYGgiIiIiUoChiYiIiEgBhiYiIiIiBRiaiIiIiBRgaCIiIiJSgKGJiIiISIH/B1FOYHrtynjWAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Saving the dataset (1/1 shards): 100%|██████████| 34459/34459 [00:01<00:00, 20791.68 examples/s] \n", + "Saving the dataset (1/1 shards): 100%|██████████| 3829/3829 [00:00<00:00, 23462.47 examples/s]\n", + "Saving the dataset (1/1 shards): 100%|██████████| 6637/6637 [00:00<00:00, 26327.15 examples/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing data for ckd2345_2022_nlp...\n", + "Train size: 18577\n", + "Val size: 2065\n", + "Test size: 2887\n", + "Tokenizing with 12 CPU processes...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Map (num_proc=12): 100%|██████████| 18577/18577 [00:03<00:00, 5233.59 examples/s] \n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAk0AAAHHCAYAAACiOWx7AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABKTElEQVR4nO3deVhU5f//8dcAsigCboCkIqm575bikiUkKVpun7IolyytIHdNK9cWS9NcKm0V+2hp9ikzTRT3MjNz13JfSwFLAbHc4P794Zfzc8TliBhLz8d1zZVzn/ec875nBnh15pwzDmOMEQAAAK7JJbcbAAAAyA8ITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAbCE1ANo0cOVIOh+Mf2dY999yje+65x7q/cuVKORwOffHFF//I9rt166by5cv/I9vKrrS0ND355JMKDAyUw+FQ3759b+n2Ml//P/7445ZuBzcuP7xfkT8RmgBJsbGxcjgc1s3T01NBQUGKiIjQ5MmTderUqRzZztGjRzVy5Eht3rw5R9aXk/Jyb3a89tprio2N1TPPPKP//ve/evzxx7PUZAad690uDajIOd26dXN6nn18fFS7dm2NHz9eZ8+evaF15fb79ZdfftHIkSN18ODBXNk+codbbjcA5CWjR49WSEiIzp8/r4SEBK1cuVJ9+/bVhAkTNH/+fNWqVcuqfemllzRkyJAbWv/Ro0c1atQolS9fXnXq1LH9uCVLltzQdrLjWr198MEHysjIuOU93Izly5erUaNGGjFixFVrOnTooIoVK1r309LS9Mwzz6h9+/bq0KGDNR4QEHBLe/038/Dw0IcffihJSk5O1v/+9z8NHDhQ69ev1+zZs22vJ7ffr7/88otGjRqle+65h71a/yKEJuASrVq1UoMGDaz7Q4cO1fLly9WmTRs98MAD+vXXX+Xl5SVJcnNzk5vbrf0R+uuvv1S4cGG5u7vf0u1cT6FChXJ1+3YkJSWpWrVq16ypVauWU/D9448/9Mwzz6hWrVp67LHHbnWLBZ4xRmfOnLF+Rq7Ezc3N6bl+9tln1bBhQ82ZM0cTJkxQUFDQTfeRH96vyJ/4eA64jhYtWmjYsGE6dOiQZs6caY1f6Zim+Ph4NW3aVH5+fvL29lblypX1wgsvSLp4HNKdd94pSerevbv1EUVsbKyki8ct1ahRQxs2bNDdd9+twoULW4+9/JimTOnp6XrhhRcUGBioIkWK6IEHHtCRI0ecasqXL69u3bpleeyl67xeb1c6RuT06dMaMGCAypYtKw8PD1WuXFlvvvmmjDFOdQ6HQzExMZo3b55q1KghDw8PVa9eXXFxcVd+wi+TlJSkHj16KCAgQJ6enqpdu7ZmzJhhLc88vuvAgQNauHCh1fvNfGyyfPlyNWvWTEWKFJGfn58efPBB/frrr9d93KFDh1SxYkXVqFFDiYmJki7uTenbt6/1PFWsWFFvvPGG056QgwcPyuFw6M0339T777+vChUqyMPDQ3feeafWr1/vtI2EhAR1795dZcqUkYeHh0qXLq0HH3zwuvPt1q2bvL29tX//fkVERKhIkSIKCgrS6NGjs7xmGRkZmjhxoqpXry5PT08FBASoV69eOnnypFNd+fLl1aZNGy1evFgNGjSQl5eX3nvvves+T5dycXGx3ocHDx7UiRMnNHDgQNWsWVPe3t7y8fFRq1attGXLFusx2Xm/3uicvv/+e911113y9PTU7bffrk8++cSqiY2N1X/+8x9J0r333mttf+XKlTc0d+Q/7GkCbHj88cf1wgsvaMmSJXrqqaeuWLNjxw61adNGtWrV0ujRo+Xh4aG9e/dqzZo1kqSqVatq9OjRGj58uHr27KlmzZpJkho3bmyt488//1SrVq3UuXNnPfbYY9f9mOjVV1+Vw+HQ888/r6SkJE2cOFHh4eHavHnzNf9v/3J2eruUMUYPPPCAVqxYoR49eqhOnTpavHixBg0apN9//11vvfWWU/3333+vL7/8Us8++6yKFi2qyZMnq2PHjjp8+LBKlChx1b7+/vtv3XPPPdq7d69iYmIUEhKiuXPnqlu3bkpOTlafPn1UtWpV/fe//1W/fv1UpkwZDRgwQJJUqlQp2/O/1NKlS9WqVSvdfvvtGjlypP7++29NmTJFTZo00caNG6/6Ucy+ffvUokULFS9eXPHx8SpZsqT++usvNW/eXL///rt69eqlcuXK6YcfftDQoUN17NgxTZw40Wkdn376qU6dOqVevXrJ4XBo7Nix6tChg/bv32/tPenYsaN27Nih5557TuXLl1dSUpLi4+N1+PDh635MlJ6ervvvv1+NGjXS2LFjFRcXpxEjRujChQsaPXq0VderVy/Fxsaqe/fu6t27tw4cOKC3335bmzZt0po1a5z25OzatUuPPPKIevXqpaeeekqVK1e+4ed83759kqQSJUpo//79mjdvnv7zn/8oJCREiYmJeu+999S8eXP98ssvCgoKuuH3643Oae/everUqZN69Oihrl276uOPP1a3bt1Uv359Va9eXXfffbd69+6tyZMn64UXXlDVqlUlyfovCjADwEyfPt1IMuvXr79qja+vr6lbt651f8SIEebSH6G33nrLSDLHjx+/6jrWr19vJJnp06dnWda8eXMjyUybNu2Ky5o3b27dX7FihZFkbrvtNpOammqNf/7550aSmTRpkjUWHBxsunbtet11Xqu3rl27muDgYOv+vHnzjCTzyiuvONV16tTJOBwOs3fvXmtMknF3d3ca27Jli5FkpkyZkmVbl5o4caKRZGbOnGmNnTt3zoSGhhpvb2+nuQcHB5vIyMhrru9yx48fN5LMiBEjrLE6deoYf39/8+effzr16+LiYrp06WKNZb7+x48fN7/++qsJCgoyd955pzlx4oRV8/LLL5siRYqY3bt3O213yJAhxtXV1Rw+fNgYY8yBAweMJFOiRAmnx3/99ddGkvnmm2+MMcacPHnSSDLjxo27oXkac/E1lGSee+45aywjI8NERkYad3d363373XffGUlm1qxZTo+Pi4vLMh4cHGwkmbi4ONs9FClSxBw/ftwcP37c7N2717z22mvG4XCYWrVqGWOMOXPmjElPT3d63IEDB4yHh4cZPXq0NXYj79fszGn16tXWWFJSkvHw8DADBgywxubOnWskmRUrVtiaOwoGPp4DbPL29r7mWXR+fn6SpK+//jrbB6F6eHioe/futuu7dOmiokWLWvc7deqk0qVL69tvv83W9u369ttv5erqqt69ezuNDxgwQMYYLVq0yGk8PDxcFSpUsO7XqlVLPj4+2r9//3W3ExgYqEceecQaK1SokHr37q20tDStWrUqB2bz/x07dkybN29Wt27dVLx4cad+77vvvis+r9u3b1fz5s1Vvnx5LV26VMWKFbOWzZ07V82aNVOxYsX0xx9/WLfw8HClp6dr9erVTut6+OGHnR6fuQcl83ny8vKSu7u7Vq5cmeVjJbtiYmKsf2d+dHru3DktXbrU6tnX11f33XefU8/169eXt7e3VqxY4bS+kJAQRURE2N7+6dOnVapUKZUqVUoVK1bUCy+8oNDQUH311VeSLv4MuLhc/NOUnp6uP//80/qoe+PGjdma843OqVq1atZzL13ca1m5cuXrvl9R8PHxHGBTWlqa/P39r7r84Ycf1ocffqgnn3xSQ4YMUVhYmDp06KBOnTpZfwSu57bbbruhg74rVarkdN/hcKhixYq3/DToQ4cOKSgoyCmwSf//44lDhw45jZcrVy7LOooVK3bdP/yHDh1SpUqVsjx/V9vOzcpc35U+YqpataoWL16s06dPq0iRItZ427ZtFRAQoMWLF8vb29vpMXv27NHWrVuv+lFhUlKS0/3Ln6fMAJX5PHl4eOiNN97QgAEDFBAQoEaNGqlNmzbq0qWLAgMDrzs/FxcX3X777U5jd9xxhyRZ75k9e/YoJSXlqu/1y3sOCQm57nYv5enpqW+++caaT0hIiMqUKWMtz8jI0KRJk/Tuu+/qwIEDSk9Pt5Zd66Pca7nROWX3/YqCj9AE2PDbb78pJSXF6XT1y3l5eWn16tVasWKFFi5cqLi4OM2ZM0ctWrTQkiVL5Orqet3t3MhxSHZd7QKc6enptnrKCVfbjrnsAOT8qGPHjpoxY4ZmzZqlXr16OS3LyMjQfffdp8GDB1/xsZmBJZOd56lv375q27at5s2bp8WLF2vYsGEaM2aMli9frrp1697kbC727O/vr1mzZl1x+eUB8Ebfs66urgoPD7/q8tdee03Dhg3TE088oZdfflnFixeXi4uL+vbtm+09uDc6p4L8fsXNITQBNvz3v/+VpOt+DOHi4qKwsDCFhYVpwoQJeu211/Tiiy9qxYoVCg8Pz/EriO/Zs8fpvjFGe/fudTqtvlixYkpOTs7y2EOHDjntdbiR3oKDg7V06VKdOnXKaW/Tzp07reU5ITg4WFu3blVGRobT3qac3s6l25MuHtx8uZ07d6pkyZJOe5kkady4cXJzc7MOcn/00UetZRUqVFBaWto1Q0J2VKhQQQMGDNCAAQO0Z88e1alTR+PHj3c6u/NKMjIytH//fqewtnv3bkmyDiKvUKGCli5dqiZNmtySEH89X3zxhe6991599NFHTuPJyckqWbKkdf9G3q+3Yk7/1LcBIG/hmCbgOpYvX66XX35ZISEhioqKumrdiRMnsoxlXnQv82rHmX9wrxRisuOTTz5xOs7qiy++0LFjx9SqVStrrEKFCvrxxx917tw5a2zBggVZLk1wI721bt1a6enpevvtt53G33rrLTkcDqft34zWrVsrISFBc+bMscYuXLigKVOmyNvbW82bN8+R7WQqXbq06tSpoxkzZjg9D9u3b9eSJUvUunXrLI9xOBx6//331alTJ3Xt2lXz58+3lj300ENau3atFi9enOVxycnJunDhwg3199dff+nMmTNOYxUqVFDRokVtX1H70tfMGKO3335bhQoVUlhYmNVzenq6Xn755SyPvXDhQo69d6/G1dU1yx6duXPn6vfff3cau5H3662YU07/LCN/YE8TcIlFixZp586dunDhghITE7V8+XLFx8crODhY8+fPl6en51UfO3r0aK1evVqRkZEKDg5WUlKS3n33XZUpU0ZNmzaVdPEPnJ+fn6ZNm6aiRYuqSJEiatiw4Q0fF5KpePHiatq0qbp3767ExERNnDhRFStWdLoswpNPPqkvvvhC999/vx566CHt27dPM2fOdDow+0Z7a9u2re699169+OKLOnjwoGrXrq0lS5bo66+/Vt++fbOsO7t69uyp9957T926ddOGDRtUvnx5ffHFF1qzZo0mTpyY5ZiqnDBu3Di1atVKoaGh6tGjh3XJAV9fX40cOfKKj3FxcdHMmTPVrl07PfTQQ/r222/VokULDRo0SPPnz1ebNm2sU9ZPnz6tbdu26YsvvtDBgwed9p5cz+7duxUWFqaHHnpI1apVk5ubm7766islJiaqc+fO1328p6en4uLi1LVrVzVs2FCLFi3SwoUL9cILL1gfUTVv3ly9evXSmDFjtHnzZrVs2VKFChXSnj17NHfuXE2aNEmdOnWy3fONatOmjUaPHq3u3burcePG2rZtm2bNmpXlWKwbeb/eijnVqVNHrq6ueuONN5SSkiIPDw+1aNHimsc9ogDIxTP3gDwj85IDmTd3d3cTGBho7rvvPjNp0iSnU9szXX7JgWXLlpkHH3zQBAUFGXd3dxMUFGQeeeSRLKebf/3116ZatWrGzc3N6ZTp5s2bm+rVq1+xv6tdcuCzzz4zQ4cONf7+/sbLy8tERkaaQ4cOZXn8+PHjzW233WY8PDxMkyZNzM8//5xlndfq7fJTuI0x5tSpU6Zfv34mKCjIFCpUyFSqVMmMGzfOZGRkONVJMtHR0Vl6utqlEC6XmJhounfvbkqWLGnc3d1NzZo1r3iaeU5dcsAYY5YuXWqaNGlivLy8jI+Pj2nbtq355ZdfnGouveRApr/++ss0b97ceHt7mx9//NEYc/F5Gjp0qKlYsaJxd3c3JUuWNI0bNzZvvvmmOXfunDHm/19y4EqXEri0vz/++MNER0ebKlWqmCJFihhfX1/TsGFD8/nnn193rpmn++/bt8+0bNnSFC5c2AQEBJgRI0ZkOcXfGGPef/99U79+fePl5WWKFi1qatasaQYPHmyOHj1q1dzoc57Zw7WcOXPGDBgwwJQuXdp4eXmZJk2amLVr1970+/Vm53Sl7X/wwQfm9ttvN66urlx+4F/CYQxHtgFAQdetWzd98cUXSktLy+1WgHyLY5oAAABsIDQBAADYQGgCAACwgWOaAAAAbGBPEwAAgA2EJgAAABu4uGUOycjI0NGjR1W0aFEurw8AQD5hjNGpU6cUFBR03S9XJzTlkKNHj6ps2bK53QYAAMiGI0eOqEyZMtesITTlkMyvczhy5Ih8fHxyuRsAAGBHamqqypYta+trmQhNOSTzIzkfHx9CEwAA+YydQ2s4EBwAAMAGQhMAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsMEttxuAPeWHLLxuzcHXI/+BTgAA+HdiTxMAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsCFXQ9Pq1avVtm1bBQUFyeFwaN68eU7LjTEaPny4SpcuLS8vL4WHh2vPnj1ONSdOnFBUVJR8fHzk5+enHj16KC0tzalm69atatasmTw9PVW2bFmNHTs2Sy9z585VlSpV5OnpqZo1a+rbb7/N8fkCAID8K1dD0+nTp1W7dm298847V1w+duxYTZ48WdOmTdO6detUpEgRRURE6MyZM1ZNVFSUduzYofj4eC1YsECrV69Wz549reWpqalq2bKlgoODtWHDBo0bN04jR47U+++/b9X88MMPeuSRR9SjRw9t2rRJ7dq1U7t27bR9+/ZbN3kAAJCvOIwxJrebkCSHw6GvvvpK7dq1k3RxL1NQUJAGDBiggQMHSpJSUlIUEBCg2NhYde7cWb/++quqVaum9evXq0GDBpKkuLg4tW7dWr/99puCgoI0depUvfjii0pISJC7u7skaciQIZo3b5527twpSXr44Yd1+vRpLViwwOqnUaNGqlOnjqZNm2ar/9TUVPn6+iolJUU+Pj459bRYyg9ZeN2ag69H5vh2AQAoyG7k73eePabpwIEDSkhIUHh4uDXm6+urhg0bau3atZKktWvXys/PzwpMkhQeHi4XFxetW7fOqrn77rutwCRJERER2rVrl06ePGnVXLqdzJrM7QAAALjldgNXk5CQIEkKCAhwGg8ICLCWJSQkyN/f32m5m5ubihcv7lQTEhKSZR2Zy4oVK6aEhIRrbudKzp49q7Nnz1r3U1NTb2R6AAAgn8mze5ryujFjxsjX19e6lS1bNrdbAgAAt1CeDU2BgYGSpMTERKfxxMREa1lgYKCSkpKcll+4cEEnTpxwqrnSOi7dxtVqMpdfydChQ5WSkmLdjhw5cqNTBAAA+UieDU0hISEKDAzUsmXLrLHU1FStW7dOoaGhkqTQ0FAlJydrw4YNVs3y5cuVkZGhhg0bWjWrV6/W+fPnrZr4+HhVrlxZxYoVs2ou3U5mTeZ2rsTDw0M+Pj5ONwAAUHDlamhKS0vT5s2btXnzZkkXD/7evHmzDh8+LIfDob59++qVV17R/PnztW3bNnXp0kVBQUHWGXZVq1bV/fffr6eeeko//fST1qxZo5iYGHXu3FlBQUGSpEcffVTu7u7q0aOHduzYoTlz5mjSpEnq37+/1UefPn0UFxen8ePHa+fOnRo5cqR+/vlnxcTE/NNPCQAAyKNy9UDwn3/+Wffee691PzPIdO3aVbGxsRo8eLBOnz6tnj17Kjk5WU2bNlVcXJw8PT2tx8yaNUsxMTEKCwuTi4uLOnbsqMmTJ1vLfX19tWTJEkVHR6t+/foqWbKkhg8f7nQtp8aNG+vTTz/VSy+9pBdeeEGVKlXSvHnzVKNGjX/gWQAAAPlBnrlOU37HdZoAAMh/CsR1mgAAAPISQhMAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgQ54OTenp6Ro2bJhCQkLk5eWlChUq6OWXX5Yxxqoxxmj48OEqXbq0vLy8FB4erj179jit58SJE4qKipKPj4/8/PzUo0cPpaWlOdVs3bpVzZo1k6enp8qWLauxY8f+I3MEAAD5Q54OTW+88YamTp2qt99+W7/++qveeOMNjR07VlOmTLFqxo4dq8mTJ2vatGlat26dihQpooiICJ05c8aqiYqK0o4dOxQfH68FCxZo9erV6tmzp7U8NTVVLVu2VHBwsDZs2KBx48Zp5MiRev/99//R+QIAgLzLYS7dbZPHtGnTRgEBAfroo4+ssY4dO8rLy0szZ86UMUZBQUEaMGCABg4cKElKSUlRQECAYmNj1blzZ/3666+qVq2a1q9frwYNGkiS4uLi1Lp1a/32228KCgrS1KlT9eKLLyohIUHu7u6SpCFDhmjevHnauXOnrV5TU1Pl6+urlJQU+fj45PAzIZUfsvC6NQdfj8zx7QIAUJDdyN/vPL2nqXHjxlq2bJl2794tSdqyZYu+//57tWrVSpJ04MABJSQkKDw83HqMr6+vGjZsqLVr10qS1q5dKz8/PyswSVJ4eLhcXFy0bt06q+buu++2ApMkRUREaNeuXTp58uQVezt79qxSU1OdbgAAoOByy+0GrmXIkCFKTU1VlSpV5OrqqvT0dL366quKioqSJCUkJEiSAgICnB4XEBBgLUtISJC/v7/Tcjc3NxUvXtypJiQkJMs6MpcVK1YsS29jxozRqFGjcmCWAAAgP8jTe5o+//xzzZo1S59++qk2btyoGTNm6M0339SMGTNyuzUNHTpUKSkp1u3IkSO53RIAALiF8vSepkGDBmnIkCHq3LmzJKlmzZo6dOiQxowZo65duyowMFCSlJiYqNKlS1uPS0xMVJ06dSRJgYGBSkpKclrvhQsXdOLECevxgYGBSkxMdKrJvJ9ZczkPDw95eHjc/CQBAEC+kKf3NP31119ycXFu0dXVVRkZGZKkkJAQBQYGatmyZdby1NRUrVu3TqGhoZKk0NBQJScna8OGDVbN8uXLlZGRoYYNG1o1q1ev1vnz562a+Ph4Va5c+YofzQEAgH+fPB2a2rZtq1dffVULFy7UwYMH9dVXX2nChAlq3769JMnhcKhv37565ZVXNH/+fG3btk1dunRRUFCQ2rVrJ0mqWrWq7r//fj311FP66aeftGbNGsXExKhz584KCgqSJD366KNyd3dXjx49tGPHDs2ZM0eTJk1S//79c2vqAAAgj8nTH89NmTJFw4YN07PPPqukpCQFBQWpV69eGj58uFUzePBgnT59Wj179lRycrKaNm2quLg4eXp6WjWzZs1STEyMwsLC5OLioo4dO2ry5MnWcl9fXy1ZskTR0dGqX7++SpYsqeHDhztdywkAAPy75enrNOUnXKcJAID8p8BcpwkAACCvIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGzIVmjav39/TvcBAACQp2UrNFWsWFH33nuvZs6cqTNnzuR0TwAAAHlOtkLTxo0bVatWLfXv31+BgYHq1auXfvrpp5zuDQAAIM/IVmiqU6eOJk2apKNHj+rjjz/WsWPH1LRpU9WoUUMTJkzQ8ePHc7pPAACAXHVTB4K7ubmpQ4cOmjt3rt544w3t3btXAwcOVNmyZdWlSxcdO3Ysp/oEAADIVTcVmn7++Wc9++yzKl26tCZMmKCBAwdq3759io+P19GjR/Xggw/mVJ8AAAC5yi07D5owYYKmT5+uXbt2qXXr1vrkk0/UunVrubhczGAhISGKjY1V+fLlc7JXAACAXJOt0DR16lQ98cQT6tatm0qXLn3FGn9/f3300Uc31RwAAEBeka3QtGfPnuvWuLu7q2vXrtlZPQAAQJ6TrWOapk+frrlz52YZnzt3rmbMmHHTTQEAAOQ12QpNY8aMUcmSJbOM+/v767XXXrvppgAAAPKabIWmw4cPKyQkJMt4cHCwDh8+fNNNAQAA5DXZCk3+/v7aunVrlvEtW7aoRIkSN90UAABAXpOt0PTII4+od+/eWrFihdLT05Wenq7ly5erT58+6ty5c073CAAAkOuyFZpefvllNWzYUGFhYfLy8pKXl5datmypFi1a5PgxTb///rsee+wxlShRQl5eXqpZs6Z+/vlna7kxRsOHD1fp0qXl5eWl8PDwLGf3nThxQlFRUfLx8ZGfn5969OihtLQ0p5qtW7eqWbNm8vT0VNmyZTV27NgcnQcAAMjfshWa3N3dNWfOHO3cuVOzZs3Sl19+qX379unjjz+Wu7t7jjV38uRJNWnSRIUKFdKiRYv0yy+/aPz48SpWrJhVM3bsWE2ePFnTpk3TunXrVKRIEUVEROjMmTNWTVRUlHbs2KH4+HgtWLBAq1evVs+ePa3lqampatmypYKDg7VhwwaNGzdOI0eO1Pvvv59jcwEAAPmbwxhjcruJqxkyZIjWrFmj77777orLjTEKCgrSgAEDNHDgQElSSkqKAgICFBsbq86dO+vXX39VtWrVtH79ejVo0ECSFBcXp9atW+u3335TUFCQpk6dqhdffFEJCQlW6BsyZIjmzZunnTt32uo1NTVVvr6+SklJkY+PTw7M3ln5IQuvW3Pw9cgc3y4AAAXZjfz9ztaepvT0dH300Ud69NFHFR4erhYtWjjdcsr8+fPVoEED/ec//5G/v7/q1q2rDz74wFp+4MABJSQkKDw83Brz9fVVw4YNtXbtWknS2rVr5efnZwUmSQoPD5eLi4vWrVtn1dx9991Oe8kiIiK0a9cunTx5MsfmAwAA8q9sXRG8T58+io2NVWRkpGrUqCGHw5HTfUmS9u/fr6lTp6p///564YUXtH79evXu3du62nhCQoIkKSAgwOlxAQEB1rKEhAT5+/s7LXdzc1Px4sWdai6/hELmOhMSEpw+Dsx09uxZnT171rqfmpp6k7MFAAB5WbZC0+zZs/X555+rdevWOd2Pk4yMDDVo0MA6uLxu3bravn27pk2blutf0TJmzBiNGjUqV3sAAAD/nGwfCF6xYsWc7iWL0qVLq1q1ak5jVatWtS6gGRgYKElKTEx0qklMTLSWBQYGKikpyWn5hQsXdOLECaeaK63j0m1cbujQoUpJSbFuR44cyc4UAQBAPpGt0DRgwABNmjRJt/oY8iZNmmjXrl1OY7t371ZwcLAkKSQkRIGBgVq2bJm1PDU1VevWrVNoaKgkKTQ0VMnJydqwYYNVs3z5cmVkZKhhw4ZWzerVq3X+/HmrJj4+XpUrV77iR3OS5OHhIR8fH6cbAAAouLL18dz333+vFStWaNGiRapevboKFSrktPzLL7/Mkeb69eunxo0b67XXXtNDDz2kn376Se+//751KQCHw6G+ffvqlVdeUaVKlRQSEqJhw4YpKChI7dq1k3Rxz9T999+vp556StOmTdP58+cVExOjzp07KygoSJL06KOPatSoUerRo4eef/55bd++XZMmTdJbb72VI/MAAAD5X7ZCk5+fn9q3b5/TvWRx55136quvvtLQoUM1evRohYSEaOLEiYqKirJqBg8erNOnT6tnz55KTk5W06ZNFRcXJ09PT6tm1qxZiomJUVhYmFxcXNSxY0dNnjzZWu7r66slS5YoOjpa9evXV8mSJTV8+HCnazkBAIB/tzx9nab8hOs0AQCQ/9zy6zRJFw+mXrp0qd577z2dOnVKknT06NEsX08CAABQEGTr47lDhw7p/vvv1+HDh3X27Fndd999Klq0qN544w2dPXtW06ZNy+k+AQAAclW29jT16dNHDRo00MmTJ+Xl5WWNt2/f3ulMNgAAgIIiW3uavvvuO/3www9Zvpy3fPny+v3333OkMQAAgLwkW3uaMjIylJ6enmX8t99+U9GiRW+6KQAAgLwmW6GpZcuWmjhxonXf4XAoLS1NI0aMuOVfrQIAAJAbsvXx3Pjx4xUREaFq1arpzJkzevTRR7Vnzx6VLFlSn332WU73CAAAkOuyFZrKlCmjLVu2aPbs2dq6davS0tLUo0cPRUVFOR0YDgAAUFBkKzRJkpubmx577LGc7AUAACDPylZo+uSTT665vEuXLtlqBgAAIK/KVmjq06eP0/3z58/rr7/+kru7uwoXLkxoAgAABU62zp47efKk0y0tLU27du1S06ZNORAcAAAUSNn+7rnLVapUSa+//nqWvVAAAAAFQY6FJuniweFHjx7NyVUCAADkCdk6pmn+/PlO940xOnbsmN5++201adIkRxoDAADIS7IVmtq1a+d03+FwqFSpUmrRooXGjx+fE30BAADkKdkKTRkZGTndBwAAQJ6Wo8c0AQAAFFTZ2tPUv39/27UTJkzIziYAAADylGyFpk2bNmnTpk06f/68KleuLEnavXu3XF1dVa9ePavO4XDkTJcAAAC5LFuhqW3btipatKhmzJihYsWKSbp4wcvu3burWbNmGjBgQI42CQAAkNuydUzT+PHjNWbMGCswSVKxYsX0yiuvcPYcAAAokLIVmlJTU3X8+PEs48ePH9epU6duuikAAIC8JluhqX379urevbu+/PJL/fbbb/rtt9/0v//9Tz169FCHDh1yukcAAIBcl61jmqZNm6aBAwfq0Ucf1fnz5y+uyM1NPXr00Lhx43K0QQAAgLwgW6GpcOHCevfddzVu3Djt27dPklShQgUVKVIkR5sDAADIK27q4pbHjh3TsWPHVKlSJRUpUkTGmJzqCwAAIE/JVmj6888/FRYWpjvuuEOtW7fWsWPHJEk9evTgcgMAAKBAylZo6tevnwoVKqTDhw+rcOHC1vjDDz+suLi4HGsOAAAgr8jWMU1LlizR4sWLVaZMGafxSpUq6dChQznSGAAAQF6SrT1Np0+fdtrDlOnEiRPy8PC46aYAAADymmyFpmbNmumTTz6x7jscDmVkZGjs2LG69957c6w5AACAvCJbH8+NHTtWYWFh+vnnn3Xu3DkNHjxYO3bs0IkTJ7RmzZqc7hEAACDXZWtPU40aNbR79241bdpUDz74oE6fPq0OHTpo06ZNqlChQk73CAAAkOtueE/T+fPndf/992vatGl68cUXb0VPAAAAec4N72kqVKiQtm7deit6AQAAyLOy9fHcY489po8++iinewEAAMizsnUg+IULF/Txxx9r6dKlql+/fpbvnJswYUKONAcAAJBX3FBo2r9/v8qXL6/t27erXr16kqTdu3c71TgcjpzrDgAAII+4odBUqVIlHTt2TCtWrJB08WtTJk+erICAgFvSHAAAQF5xQ8c0GWOc7i9atEinT5/O0YYAAADyomwdCJ7p8hAFAABQUN1QaHI4HFmOWeIYJgAA8G9wQ8c0GWPUrVs360t5z5w5o6effjrL2XNffvllznUIAACQB9xQaOratavT/cceeyxHmwEAAMirbig0TZ8+/Vb1AQAAkKfd1IHgAAAA/xaEJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAb8lVoev311+VwONS3b19r7MyZM4qOjlaJEiXk7e2tjh07KjEx0elxhw8fVmRkpAoXLix/f38NGjRIFy5ccKpZuXKl6tWrJw8PD1WsWFGxsbH/wIwAAEB+kW9C0/r16/Xee++pVq1aTuP9+vXTN998o7lz52rVqlU6evSoOnToYC1PT09XZGSkzp07px9++EEzZsxQbGyshg8fbtUcOHBAkZGRuvfee7V582b17dtXTz75pBYvXvyPzQ8AAORt+SI0paWlKSoqSh988IGKFStmjaekpOijjz7ShAkT1KJFC9WvX1/Tp0/XDz/8oB9//FGStGTJEv3yyy+aOXOm6tSpo1atWunll1/WO++8o3PnzkmSpk2bppCQEI0fP15Vq1ZVTEyMOnXqpLfeeitX5gsAAPKefBGaoqOjFRkZqfDwcKfxDRs26Pz5807jVapUUbly5bR27VpJ0tq1a1WzZk0FBARYNREREUpNTdWOHTusmsvXHRERYa3jSs6ePavU1FSnGwAAKLjccruB65k9e7Y2btyo9evXZ1mWkJAgd3d3+fn5OY0HBAQoISHBqrk0MGUuz1x2rZrU1FT9/fff8vLyyrLtMWPGaNSoUdmeFwAAyF/y9J6mI0eOqE+fPpo1a5Y8PT1zux0nQ4cOVUpKinU7cuRIbrcEAABuoTwdmjZs2KCkpCTVq1dPbm5ucnNz06pVqzR58mS5ubkpICBA586dU3JystPjEhMTFRgYKEkKDAzMcjZd5v3r1fj4+FxxL5MkeXh4yMfHx+kGAAAKrjwdmsLCwrRt2zZt3rzZujVo0EBRUVHWvwsVKqRly5ZZj9m1a5cOHz6s0NBQSVJoaKi2bdumpKQkqyY+Pl4+Pj6qVq2aVXPpOjJrMtcBAACQp49pKlq0qGrUqOE0VqRIEZUoUcIa79Gjh/r376/ixYvLx8dHzz33nEJDQ9WoUSNJUsuWLVWtWjU9/vjjGjt2rBISEvTSSy8pOjpaHh4ekqSnn35ab7/9tgYPHqwnnnhCy5cv1+eff66FCxf+sxMGAAB5Vp4OTXa89dZbcnFxUceOHXX27FlFRETo3XfftZa7urpqwYIFeuaZZxQaGqoiRYqoa9euGj16tFUTEhKihQsXql+/fpo0aZLKlCmjDz/8UBEREbkxJQAAkAc5jDEmt5soCFJTU+Xr66uUlJRbcnxT+SHX3+t18PXIHN8uAAAF2Y38/c7TxzQBAADkFYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgg1tuN4CcU37IwuvWHHw98h/oBACAgoc9TQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA2EJgAAABsITQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANeTo0jRkzRnfeeaeKFi0qf39/tWvXTrt27XKqOXPmjKKjo1WiRAl5e3urY8eOSkxMdKo5fPiwIiMjVbhwYfn7+2vQoEG6cOGCU83KlStVr149eXh4qGLFioqNjb3V0wMAAPlIng5Nq1atUnR0tH788UfFx8fr/PnzatmypU6fPm3V9OvXT998843mzp2rVatW6ejRo+rQoYO1PD09XZGRkTp37px++OEHzZgxQ7GxsRo+fLhVc+DAAUVGRuree+/V5s2b1bdvXz355JNavHjxPzpfAACQdzmMMSa3m7Dr+PHj8vf316pVq3T33XcrJSVFpUqV0qeffqpOnTpJknbu3KmqVatq7dq1atSokRYtWqQ2bdro6NGjCggIkCRNmzZNzz//vI4fPy53d3c9//zzWrhwobZv325tq3PnzkpOTlZcXJyt3lJTU+Xr66uUlBT5+Pjk+NzLD1mYI+s5+HpkjqwHAICC4Eb+fufpPU2XS0lJkSQVL15ckrRhwwadP39e4eHhVk2VKlVUrlw5rV27VpK0du1a1axZ0wpMkhQREaHU1FTt2LHDqrl0HZk1meu4krNnzyo1NdXpBgAACq58E5oyMjLUt29fNWnSRDVq1JAkJSQkyN3dXX5+fk61AQEBSkhIsGouDUyZyzOXXasmNTVVf//99xX7GTNmjHx9fa1b2bJlb3qOAAAg78o3oSk6Olrbt2/X7Nmzc7sVSdLQoUOVkpJi3Y4cOZLbLQEAgFvILbcbsCMmJkYLFizQ6tWrVaZMGWs8MDBQ586dU3JystPepsTERAUGBlo1P/30k9P6Ms+uu7Tm8jPuEhMT5ePjIy8vryv25OHhIQ8Pj5ueGwAAyB/y9J4mY4xiYmL01Vdfafny5QoJCXFaXr9+fRUqVEjLli2zxnbt2qXDhw8rNDRUkhQaGqpt27YpKSnJqomPj5ePj4+qVatm1Vy6jsyazHUAAADk6T1N0dHR+vTTT/X111+raNGi1jFIvr6+8vLykq+vr3r06KH+/furePHi8vHx0XPPPafQ0FA1atRIktSyZUtVq1ZNjz/+uMaOHauEhAS99NJLio6OtvYUPf3003r77bc1ePBgPfHEE1q+fLk+//xzLVyYM2esAQCA/C9P72maOnWqUlJSdM8996h06dLWbc6cOVbNW2+9pTZt2qhjx466++67FRgYqC+//NJa7urqqgULFsjV1VWhoaF67LHH1KVLF40ePdqqCQkJ0cKFCxUfH6/atWtr/Pjx+vDDDxUREfGPzhcAAORd+eo6TXkZ12kCACD/KbDXaQIAAMgthCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADAhjz9NSrIeXYukskFMAEAyIo9TQAAADYQmgAAAGwgNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2OCW2w0g7yk/ZOF1aw6+HvkPdAIAQN7BniYAAAAbCE0AAAA2EJoAAABsIDQBAADYQGgCAACwgdAEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANvA1KsgWvmoFAPBvw54mAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYAOhCQAAwAZCEwAAgA1c3BK3DBfABAAUJOxpAgAAsIHQBAAAYAOhCQAAwAaOaUKu4rgnAEB+QWhCnkewAgDkBYQmAACQ6/LD/yATmi7zzjvvaNy4cUpISFDt2rU1ZcoU3XXXXbndFq4jP/ywAQDyNw4Ev8ScOXPUv39/jRgxQhs3blTt2rUVERGhpKSk3G4NAADkMvY0XWLChAl66qmn1L17d0nStGnTtHDhQn388ccaMmRILneHm2Vnb1ROYa8WABQ8hKb/c+7cOW3YsEFDhw61xlxcXBQeHq61a9fmYmcoqHLqI0U+mgSAfwah6f/88ccfSk9PV0BAgNN4QECAdu7cmaX+7NmzOnv2rHU/JSVFkpSamnpL+ss4+9ctWS9ujXL95hbI9eQ120dFXLemxojF/0AnF+W1fuyw07MdOTWvnOoH+Y+dv3O34m9s5jqNMdetJTRl05gxYzRq1Kgs42XLls2FboB/J9+Jud2Bs7zWjx15ree81g/yllv5/jh16pR8fX2vWUNo+j8lS5aUq6urEhMTncYTExMVGBiYpX7o0KHq37+/dT8jI0MnTpxQiRIl5HA4cqyv1NRUlS1bVkeOHJGPj0+OrTcvYG75U0Gem1Sw58fc8ifmdmsZY3Tq1CkFBQVdt5bQ9H/c3d1Vv359LVu2TO3atZN0MQgtW7ZMMTExWeo9PDzk4eHhNObn53fL+vPx8SlwPyyZmFv+VJDnJhXs+TG3/Im53TrX28OUidB0if79+6tr165q0KCB7rrrLk2cOFGnT5+2zqYDAAD/XoSmSzz88MM6fvy4hg8froSEBNWpU0dxcXFZDg4HAAD/PoSmy8TExFzx47jc4uHhoREjRmT5KLAgYG75U0Gem1Sw58fc8ifmlnc4jJ1z7AAAAP7l+BoVAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoyuPeeecdlS9fXp6enmrYsKF++umn3G7JyerVq9W2bVsFBQXJ4XBo3rx5TsuNMRo+fLhKly4tLy8vhYeHa8+ePU41J06cUFRUlHx8fOTn56cePXooLS3NqWbr1q1q1qyZPD09VbZsWY0dO/ZWT01jxozRnXfeqaJFi8rf31/t2rXTrl27nGrOnDmj6OholShRQt7e3urYsWOWq8ofPnxYkZGRKly4sPz9/TVo0CBduHDBqWblypWqV6+ePDw8VLFiRcXGxt7SuU2dOlW1atWyLigXGhqqRYsW5ft5Xcnrr78uh8Ohvn37WmP5dX4jR46Uw+FwulWpUiXfzyvT77//rscee0wlSpSQl5eXatasqZ9//tlanp9/n5QvXz7La+dwOBQdHS0p/7526enpGjZsmEJCQuTl5aUKFSro5Zdfdvoet/z8umVhkGfNnj3buLu7m48//tjs2LHDPPXUU8bPz88kJibmdmuWb7/91rz44ovmyy+/NJLMV1995bT89ddfN76+vmbevHlmy5Yt5oEHHjAhISHm77//tmruv/9+U7t2bfPjjz+a7777zlSsWNE88sgj1vKUlBQTEBBgoqKizPbt281nn31mvLy8zHvvvXdL5xYREWGmT59utm/fbjZv3mxat25typUrZ9LS0qyap59+2pQtW9YsW7bM/Pzzz6ZRo0amcePG1vILFy6YGjVqmPDwcLNp0ybz7bffmpIlS5qhQ4daNfv37zeFCxc2/fv3N7/88ouZMmWKcXV1NXFxcbdsbvPnzzcLFy40u3fvNrt27TIvvPCCKVSokNm+fXu+ntflfvrpJ1O+fHlTq1Yt06dPH2s8v85vxIgRpnr16ubYsWPW7fjx4/l+XsYYc+LECRMcHGy6detm1q1bZ/bv328WL15s9u7da9Xk598nSUlJTq9bfHy8kWRWrFhhjMm/r92rr75qSpQoYRYsWGAOHDhg5s6da7y9vc2kSZOsmvz8ul2O0JSH3XXXXSY6Otq6n56eboKCgsyYMWNysauruzw0ZWRkmMDAQDNu3DhrLDk52Xh4eJjPPvvMGGPML7/8YiSZ9evXWzWLFi0yDofD/P7778YYY959911TrFgxc/bsWavm+eefN5UrV77FM3KWlJRkJJlVq1YZYy7OpVChQmbu3LlWza+//mokmbVr1xpjLoZKFxcXk5CQYNVMnTrV+Pj4WPMZPHiwqV69utO2Hn74YRMREXGrp+SkWLFi5sMPPyww8zp16pSpVKmSiY+PN82bN7dCU36e34gRI0zt2rWvuCw/z8uYiz/TTZs2verygvb7pE+fPqZChQomIyMjX792kZGR5oknnnAa69Chg4mKijLGFLzXjY/n8qhz585pw4YNCg8Pt8ZcXFwUHh6utWvX5mJn9h04cEAJCQlOc/D19VXDhg2tOaxdu1Z+fn5q0KCBVRMeHi4XFxetW7fOqrn77rvl7u5u1URERGjXrl06efLkPzQbKSUlRZJUvHhxSdKGDRt0/vx5p/lVqVJF5cqVc5pfzZo1na4qHxERodTUVO3YscOquXQdmTX/1Oucnp6u2bNn6/Tp0woNDS0w84qOjlZkZGSWHvL7/Pbs2aOgoCDdfvvtioqK0uHDhwvEvObPn68GDRroP//5j/z9/VW3bl198MEH1vKC9Pvk3Llzmjlzpp544gk5HI58/do1btxYy5Yt0+7duyVJW7Zs0ffff69WrVpJKlivm8QxTXnWH3/8ofT09Cxf4RIQEKCEhIRc6urGZPZ5rTkkJCTI39/fabmbm5uKFy/uVHOldVy6jVstIyNDffv2VZMmTVSjRg1r2+7u7lm+qPny+V2v96vVpKam6u+//74V05Ekbdu2Td7e3vLw8NDTTz+tr776StWqVcv385Kk2bNna+PGjRozZkyWZfl5fg0bNlRsbKzi4uI0depUHThwQM2aNdOpU6fy9bwkaf/+/Zo6daoqVaqkxYsX65lnnlHv3r01Y8YMp/4Kwu+TefPmKTk5Wd26dbO2m19fuyFDhqhz586qUqWKChUqpLp166pv376Kiopy6q0gvG4SX6MC2BIdHa3t27fr+++/z+1WckzlypW1efNmpaSk6IsvvlDXrl21atWq3G7rph05ckR9+vRRfHy8PD09c7udHJX5f++SVKtWLTVs2FDBwcH6/PPP5eXllYud3byMjAw1aNBAr732miSpbt262r59u6ZNm6auXbvmcnc566OPPlKrVq0UFBSU263ctM8//1yzZs3Sp59+qurVq2vz5s3q27evgoKCCtzrJrGnKc8qWbKkXF1ds5w9kZiYqMDAwFzq6sZk9nmtOQQGBiopKclp+YULF3TixAmnmiut49Jt3EoxMTFasGCBVqxYoTJlyljjgYGBOnfunJKTk7P0diO9X63Gx8fnlv4hdHd3V8WKFVW/fn2NGTNGtWvX1qRJk/L9vDZs2KCkpCTVq1dPbm5ucnNz06pVqzR58mS5ubkpICAgX8/vUn5+frrjjju0d+/efP+6lS5dWtWqVXMaq1q1qvXxY0H5fXLo0CEtXbpUTz75pDWWn1+7QYMGWXubatasqccff1z9+vWz9vIWlNctE6Epj3J3d1f9+vW1bNkyaywjI0PLli1TaGhoLnZmX0hIiAIDA53mkJqaqnXr1llzCA0NVXJysjZs2GDVLF++XBkZGWrYsKFVs3r1ap0/f96qiY+PV+XKlVWsWLFb1r8xRjExMfrqq6+0fPlyhYSEOC2vX7++ChUq5DS/Xbt26fDhw07z27Ztm9MvhPj4ePn4+Fh/IEJDQ53WkVnzT7/OGRkZOnv2bL6fV1hYmLZt26bNmzdbtwYNGigqKsr6d36e36XS0tK0b98+lS5dOt+/bk2aNMlySY/du3crODhYUv7/fZJp+vTp8vf3V2RkpDWWn1+7v/76Sy4uzlHC1dVVGRkZkgrO62b5Rw87xw2ZPXu28fDwMLGxseaXX34xPXv2NH5+fk5nT+S2U6dOmU2bNplNmzYZSWbChAlm06ZN5tChQ8aYi6ea+vn5ma+//tps3brVPPjgg1c81bRu3bpm3bp15vvvvzeVKlVyOtU0OTnZBAQEmMcff9xs377dzJ492xQuXPiWn2r6zDPPGF9fX7Ny5UqnU4X/+usvq+bpp5825cqVM8uXLzc///yzCQ0NNaGhodbyzNOEW7ZsaTZv3mzi4uJMqVKlrnia8KBBg8yvv/5q3nnnnVt+mvCQIUPMqlWrzIEDB8zWrVvNkCFDjMPhMEuWLMnX87qaS8+eMyb/zm/AgAFm5cqV5sCBA2bNmjUmPDzclCxZ0iQlJeXreRlz8fIQbm5u5tVXXzV79uwxs2bNMoULFzYzZ860avLz7xNjLp4BXa5cOfP8889nWZZfX7uuXbua2267zbrkwJdffmlKlixpBg8ebNXk99ftUoSmPG7KlCmmXLlyxt3d3dx1113mxx9/zO2WnKxYscJIynLr2rWrMebi6abDhg0zAQEBxsPDw4SFhZldu3Y5rePPP/80jzzyiPH29jY+Pj6me/fu5tSpU041W7ZsMU2bNjUeHh7mtttuM6+//votn9uV5iXJTJ8+3ar5+++/zbPPPmuKFStmChcubNq3b2+OHTvmtJ6DBw+aVq1aGS8vL1OyZEkzYMAAc/78eaeaFStWmDp16hh3d3dz++23O23jVnjiiSdMcHCwcXd3N6VKlTJhYWFWYMrP87qay0NTfp3fww8/bEqXLm3c3d3NbbfdZh5++GGn6xjl13ll+uabb0yNGjWMh4eHqVKlinn//fedlufn3yfGGLN48WIjKUvPxuTf1y41NdX06dPHlCtXznh6eprbb7/dvPjii06XBsjvr9ulHMZcctlOAAAAXBHHNAEAANhAaAIAALCB0AQAAGADoQkAAMAGQhMAAIANhCYAAAAbCE0AAAA2EJoA/CMOHjwoh8OhzZs353YrBUpsbKz8/Pxyuw3gX4HQBMA2h8NxzdvIkSNzu8U8KzM0Zt5KlCihli1batOmTbbXUb58eU2cONFp7OGHH9bu3btztNeVK1fK4XBk+QJZ4N/OLbcbAJB/HDt2zPr3nDlzNHz4cKcvWfX29s6NtvKU9PR0ORyOLF9immnp0qWqXr26fvvtN/Xu3VutWrXSzp07s723yMvL65Z9gz0AZ+xpAmBbYGCgdfP19ZXD4bDu+/v7a8KECSpTpow8PDxUp04dxcXFXXVd6enpeuKJJ1SlShUdPnxYkvT111+rXr168vT01O23365Ro0bpwoUL1mMcDoc+/PBDtW/fXoULF1alSpU0f/58a/nJkycVFRWlUqVKycvLS5UqVdL06dOv2sM999yjmJgYxcTEyNfXVyVLltSwYcN06bdLnT17VgMHDtRtt92mIkWKqGHDhlq5cqW1PPPjsfnz56tatWry8PCw5nMlJUqUUGBgoBo0aKA333xTiYmJWrdunfbt26cHH3xQAQEB8vb21p133qmlS5c69Xro0CH169fP2lt16fYvdTPP48GDB3XvvfdKkooVKyaHw6Fu3bpddT7Av8o//m13AAqE6dOnG19fX+v+hAkTjI+Pj/nss8/Mzp07zeDBg02hQoXM7t27jTHGHDhwwEgymzZtMmfOnDHt27c3devWNUlJScYYY1avXm18fHxMbGys2bdvn1myZIkpX768GTlypLUNSaZMmTLm008/NXv27DG9e/c23t7e5s8//zTGGBMdHW3q1Klj1q9fbw4cOGDi4+PN/PnzrzqH5s2bG29vb9OnTx+zc+dOM3PmTFO4cGGnL4p98sknTePGjc3q1avN3r17zbhx44yHh4c1r+nTp5tChQqZxo0bmzVr1pidO3ea06dPZ9nWpfPPtHHjRiPJzJ8/32zevNlMmzbNbNu2zezevdu89NJLxtPT0xw6dMgYc/ELTcuUKWNGjx5tjh07Zn2Z6+Wvw80+jxcuXDD/+9//rC+WPXbsmElOTr7mewH4tyA0AciWy/9YBwUFmVdffdWp5s477zTPPvusMeb/h4bvvvvOhIWFmaZNmzr9MQ4LCzOvvfaa0+P/+9//mtKlS1v3JZmXXnrJup+WlmYkmUWLFhljjGnbtq3p3r277Tk0b97cVK1a1WRkZFhjzz//vKlataoxxphDhw4ZV1dX8/vvvzs9LiwszAwdOtR6HiSZzZs3X3Nbl4emkydPmvbt2xtvb2+TkJBwxcdUr17dTJkyxbofHBxs3nrrLaeay1+HnHgeV6xYYSSZkydPXnNOwL8NxzQBuGmpqak6evSomjRp4jTepEkTbdmyxWnskUceUZkyZbR8+XKnY3G2bNmiNWvW6NVXX7XG0tPTdebMGf31118qXLiwJKlWrVrW8iJFisjHx0dJSUmSpGeeeUYdO3bUxo0b1bJlS7Vr106NGze+Zu+NGjWyPuqSpNDQUI0fP17p6enatm2b0tPTdccddzg95uzZsypRooR1393d3amva2ncuLFcXFx0+vRp3X777ZozZ44CAgKUlpamkSNHauHChTp27JguXLigv//++5of9V1JTjyPAK6M0ATgH9W6dWvNnDlTa9euVYsWLazxtLQ0jRo1Sh06dMjyGE9PT+vfhQoVclrmcDiUkZEhSWrVqpUOHTqkb7/9VvHx8QoLC1N0dLTefPPNbPWalpYmV1dXbdiwQa6urk7LLj3o3cvLyyl4XcucOXNUrVo1lShRwulYpIEDByo+Pl5vvvmmKlasKC8vL3Xq1Ennzp274Z5v9nkEcGWEJgA3zcfHR0FBQVqzZo2aN29uja9Zs0Z33XWXU+0zzzyjGjVq6IEHHtDChQut+nr16mnXrl2qWLHiTfVSqlQpde3aVV27dlWzZs00aNCga4amdevWOd3/8ccfValSJbm6uqpu3bpKT09XUlKSmjVrdlN9ZSpbtqwqVKiQZXzNmjXq1q2b2rdvL+li+Dl48KBTjbu7u9LT06+5/px4Ht3d3SXputsC/m0ITQByxKBBgzRixAhVqFBBderU0fTp07V582bNmjUrS+1zzz2n9PR0tWnTRosWLVLTpk01fPhwtWnTRuXKlVOnTp3k4uKiLVu2aPv27XrllVds9TB8+HDVr19f1atX19mzZ7VgwQJVrVr1mo85fPiw+vfvr169emnjxo2aMmWKxo8fL0m64447FBUVpS5dumj8+PGqW7eujh8/rmXLlqlWrVqKjIy88SfqKipVqqQvv/xSbdu2lcPh0LBhw7Ls+SlfvrxWr16tzp07y8PDQyVLlrzic3Czz2NwcLAcDocWLFig1q1by8vLi8tJAOKSAwBySO/evdW/f38NGDBANWvWVFxcnObPn69KlSpdsb5v374aNWqUWrdurR9++EERERFasGCBlixZojvvvFONGjXSW2+9peDgYNs9uLu7a+jQoapVq5buvvtuubq6avbs2dd8TJcuXfT333/rrrvuUnR0tPr06aOePXtay6dPn64uXbpowIABqly5stq1a6f169erXLlytvuyY8KECSpWrJgaN26stm3bKiIiQvXq1XOqGT16tA4ePKgKFSqoVKlSV1xPTjyPt912m0aNGqUhQ4YoICBAMTExNzU3oKBwGHPJBUkA4F/knnvuUZ06dbJcZRsAroQ9TQAAADYQmgAAAGzg4zkAAAAb2NMEAABgA6EJAADABkITAACADYQmAAAAGwhNAAAANhCaAAAAbCA0AQAA2EBoAgAAsIHQBAAAYMP/A7DYY49tz9qQAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Saving the dataset (1/1 shards): 100%|██████████| 18577/18577 [00:01<00:00, 11427.14 examples/s]\n", + "Saving the dataset (1/1 shards): 100%|██████████| 2065/2065 [00:00<00:00, 14968.99 examples/s]\n", + "Saving the dataset (1/1 shards): 100%|██████████| 2887/2887 [00:00<00:00, 11546.38 examples/s]\n" + ] + } + ], + "source": [ + "for condition in conditions:\n", + " \n", + " # Load dataset\n", + " print(f\"Processing data for {condition}...\")\n", + " dataset_name = f\"{time_slice}_{condition}_ids_text_feats_concatenated.hf\"\n", + " d = load_from_disk(os.path.join(data_dir, dataset_name))\n", + " \n", + " # Convert to dataframes and create validation split\n", + " train_df = d['train'].to_pandas()\n", + " test_df = d['test'].to_pandas()\n", + " train_df, val_df = train_test_split(train_df, test_size=val_size, random_state=seed, stratify=train_df['label'].values)\n", + " \n", + " # Select text, and label columns\n", + " train_df, val_df, test_df = train_df[cols], val_df[cols], test_df[cols]\n", + " \n", + " # Remove duplicate notes\n", + " train_df['text'] = train_df['text'].apply(remove_duplicates_join_on_sep)\n", + " val_df['text'] = val_df['text'].apply(remove_duplicates_join_on_sep)\n", + " test_df['text'] = test_df['text'].apply(remove_duplicates_join_on_sep)\n", + " \n", + " # Count data split sizes\n", + " print(f\"Train size: {train_df.shape[0]}\")\n", + " print(f\"Val size: {val_df.shape[0]}\")\n", + " print(f\"Test size: {test_df.shape[0]}\")\n", + " \n", + " # Create new HF datasets\n", + " train_dataset = Dataset.from_pandas(train_df) \n", + " val_dataset = Dataset.from_pandas(val_df) \n", + " test_dataset = Dataset.from_pandas(test_df)\n", + " \n", + " # View token length distribution\n", + " tokenize_plot(train_dataset, tokenizer, truncate_to=max_seq_len)\n", + " \n", + " # Create new dataset dict\n", + " dataset_dict = { \n", + " \"train\": train_dataset, \n", + " \"val\": val_dataset, \n", + " \"test\": test_dataset \n", + " } \n", + " dataset_dict = DatasetDict(dataset_dict) \n", + " \n", + " # Save dataset dict\n", + " new_dataset_name = f\"{time_slice}_{condition}_text_label.hf\"\n", + " dataset_dict.save_to_disk(os.path.join(data_dir, new_dataset_name))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bc81ceba-a91a-4e63-bac6-2c4d33b46a88", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.10 - SDK v2", + "language": "python", + "name": "python310-sdkv2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/long_roberta/requirements.txt b/long_roberta/requirements.txt new file mode 100644 index 0000000..92de230 --- /dev/null +++ b/long_roberta/requirements.txt @@ -0,0 +1,13 @@ +transformers==4.34.0 +tokenizers==0.14.1 +PyYAML +sentencepiece +scikit-learn +pandas +datasets +protobuf +matplotlib +torch==1.13.1 +accelerate +pyarrow +pytest \ No newline at end of file diff --git a/long_roberta/run_exps.py b/long_roberta/run_exps.py new file mode 100644 index 0000000..4639542 --- /dev/null +++ b/long_roberta/run_exps.py @@ -0,0 +1,88 @@ +import os +import subprocess +import yaml + +def update_params_yaml( + params, + dataset_path, + output_path, + model_name, + pooling_strategy, + tokenizer_path, + bert_path + ): + params['dataset_path'] = dataset_path + params['output_path'] = output_path + params['model_name'] = model_name + params['pooling_strategy'] = pooling_strategy + params['tokenizer_path'] = tokenizer_path + params['bert_path'] = bert_path + + with open('params.yml', 'w') as params_file: + yaml.dump(params, params_file) + +def run_train_and_evaluate(): + subprocess.run(['python', 'train_and_evaluate.py']) + +# Load the params.yml file +with open('params.yml', 'r') as params_file: + params = yaml.safe_load(params_file) + +# Set tokenizer_path and bert_path +tokenizer_path = 'roberta_v2/' +bert_path = 'checkpoint-500000/' + +datasets = [ + '_text_label.hf', + '_text_label.hf', + '_text_label.hf', + '_text_label.hf', + '_text_label.hf', + '_text_label.hf' +] +output_paths = [ + '_text_only/', + '_text_only_mean/', + '_text_only/', + '_text_only_mean/', + '_text_only/', + '_text_only_mean/' +] +model_names = [ + "_text_only", + "_text_only_mean", + "_text_only", + "_text_only_mean", + "_text_only", + "_text_only_mean" +] +pooling_strategies = [ + "max", + "mean", + "max", + "mean", + "max", + "mean" +] + +assert len(datasets) == len(output_paths) == len(model_names) == len(pooling_strategies), "Error in param lists." + +# Iterate through the different values and run train_and_evaluate.py +for dataset, output_path, model_name, pooling_strategy in zip(datasets, output_paths, model_names, pooling_strategies): + + # Update params.yml file with new values + update_params_yaml( + params, + dataset, + output_path, + model_name, + pooling_strategy, + tokenizer_path, + bert_path + ) + + # Create output directory if it doesn't exist + os.makedirs(output_path, exist_ok=True) + + # Run the train_and_evaluate.py script + run_train_and_evaluate() diff --git a/long_roberta/text_preprocessors.py b/long_roberta/text_preprocessors.py new file mode 100644 index 0000000..a5ace0c --- /dev/null +++ b/long_roberta/text_preprocessors.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# coding: utf-8 + +""" +Classes and functions for tokenization and text preprocessing +""" + +import numpy as np +import torch +from pooling import transform_text_to_model_input + + +class Preprocessor(): + ''' + An abstract class for text preprocesssors. Preprocessor takes an array of strings and transforms it to an array of data compatible with the model + ''' + + def __init__(self): + pass + + def preprocess(self, array_of_texts): + raise NotImplementedError( + "Preprocessing is implemented for subclasses only") + + +class BERTTokenizer(Preprocessor): + def __init__(self, tokenizer): + self.tokenizer = tokenizer + + def preprocess(self, array_of_texts): + tokens = tokenize(array_of_texts, self.tokenizer) + return tokens + + +class BERTTokenizerPooled(Preprocessor): + def __init__(self, tokenizer, size, step, minimal_length, max_num_segments): + self.tokenizer = tokenizer + self.text_splits_params = [size, step, minimal_length, max_num_segments] + + def preprocess(self, array_of_texts): + array_of_preprocessed_data = tokenize_pooled( + array_of_texts, self.tokenizer, *self.text_splits_params) + return array_of_preprocessed_data + + +def tokenize(texts, tokenizer): + ''' + Transforms list of texts to list of tokens (truncated to 512 tokens) + + Parameters: + texts - list of strings + tokenizer - object of class transformers.PreTrainedTokenizerFast + + Returns: + array_of_preprocessed_data - array of the length len(texts) + ''' + + texts = list(texts) + tokenizer.pad_token = "" + tokens = tokenizer.batch_encode_plus( + texts, + max_length=512, + padding=True, + truncation=True, + return_tensors='pt') + + return tokens + + +def tokenize_pooled(texts, tokenizer, size, step, minimal_length, max_num_segments): + ''' + Tokenizes texts and splits to chunks of 512 tokens + + Parameters: + texts - list of strings + tokenizer - object of class transformers.PreTrainedTokenizerFast + + size - size of text chunk to tokenize (must be <= 510) + step - stride of pooling + minimal_length - minimal length of a text chunk + + Returns: + array_of_preprocessed_data - array of the length len(texts) + ''' + model_inputs = [ + transform_text_to_model_input( + text, + tokenizer, + size, + step, + minimal_length) for text in texts] + + input_ids = [model_input[0][0:max_num_segments] for model_input in model_inputs] + attention_mask = [model_input[1][0:max_num_segments] for model_input in model_inputs] + + tokens = {'input_ids': input_ids, 'attention_mask': attention_mask} + return tokens diff --git a/long_roberta/train_and_evaluate.py b/long_roberta/train_and_evaluate.py new file mode 100644 index 0000000..ed2b560 --- /dev/null +++ b/long_roberta/train_and_evaluate.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Open imports +import os +import json +import yaml +import logging +import pickle +import numpy as np +import pandas as pd +from datasets import load_from_disk + +# Project imports +from main import BERTClassificationModelWithPooling, BERTClassificationModel +from utils import check_empty_count_gpus, create_current_run, np_sigmoid, load_and_split_imdb_data, plot_learning_curve +from metrics import BootstrapMultiLabelMetrics + +# Load run parameters +with open("params.yml", "r") as stream: + PARAMS = yaml.safe_load(stream) + +# Define logger +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Log parameters +logger.info(PARAMS) + +# Check, empty, and count GPUs +check_empty_count_gpus(logger=logger) + +# Set gpus +os.environ["CUDA_VISIBLE_DEVICES"]= PARAMS['visible_gpus'] + +# Create run directory +current_run_dir = create_current_run(save_path=PARAMS['output_path'], params=PARAMS, logger=logger) +logger.info(f"Created run directory: {current_run_dir}.") + +# Set run name +run_name = current_run_dir.split('/')[-1] +logger.info(f"Starting run {run_name}...") + +# Load data +if PARAMS['test_with_imdb_data']: + + # Use IMDB data to test model + X_train, X_val, X_test, y_train, y_val, y_test = load_and_split_imdb_data(PARAMS['imdb_data'], num_labels=PARAMS['num_labels']) + +else: + + # Load real data + d = load_from_disk(PARAMS['dataset_path']) + X_train = np.array(d['train']['text']) + y_train = np.array(d['train']['label']).reshape(-1, 1) + X_val = np.array(d['val']['text']) + y_val = np.array(d['val']['label']).reshape(-1, 1) + X_test = np.array(d['test']['text']) + y_test = np.array(d['test']['label']).reshape(-1, 1) + +# Print data shapes +logger.info(f'Train shapes: {len(X_train), y_train.shape}') +logger.info(f'Val shapes: {len(X_val), y_val.shape}') +logger.info(f'Test shapes: {len(X_test), y_test.shape}') + +# Load model +if PARAMS['use_pooled_bert']: + model = BERTClassificationModelWithPooling() +else: + model = BERTClassificationModel() + +# Train and evaluate +result = model.train_and_evaluate( + X_train, + X_val, + X_test, + y_train, + y_val, + y_test, + epochs=PARAMS['epochs'], + early_stopping_epochs=PARAMS['early_stopping_epochs'], + logger=logger +) + +# Find best epoch +best_epoch = np.argmin(result['val_loss']) +logger.info(f'Val losses: {result["val_loss"]}.') +logger.info(f'Best val loss: {np.min(result["val_loss"])}.') +logger.info(best_epoch) + +# Get test preds +test_preds = np.array(result['test_preds'][best_epoch]) +test_labels = np.array(result['test_labels'][best_epoch]) + +# Save final preds and labels +with open(f"./{PARAMS['model_name']}_scores.pkl", "wb") as f: + pickle.dump(test_preds, f) +with open(f"./{PARAMS['model_name']}_labels.pkl", "wb") as f: + pickle.dump(test_labels, f) + +# Compute final performance +evaluator = BootstrapMultiLabelMetrics(labels=test_labels, preds=test_preds) +metrics_dict = evaluator.get_all_bootstrapped_metrics_as_dict(n_bootstrap=1000) +logger.info(metrics_dict) + +# Save metrics +with open(current_run_dir + 'metrics.json', "w") as f: + json.dump(metrics_dict, f) +with open(f'./{PARAMS["model_name"]}_metrics.json', "w") as f: + json.dump(metrics_dict, f) + +# Plot learning curves from training +nresult = {k:v for k, v in result.items() if 'test' not in k} +plot_learning_curve(nresult, current_run_dir, prefix=PARAMS['model_name']) +plot_learning_curve(nresult, './', prefix=PARAMS['model_name']) diff --git a/long_roberta/utils.py b/long_roberta/utils.py new file mode 100644 index 0000000..dfbbbc7 --- /dev/null +++ b/long_roberta/utils.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python +# coding: utf-8 + +""" +Utility functions for deep learning experiments +""" + +import os +import yaml +import torch +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.model_selection import train_test_split + + +def create_current_run(save_path, params, logger=None): + """ + Create a directory for the current run, save the + current pipeline parameters, and return the + path to the current run directory. + """ + + # Create current run dir + src_dirs = os.listdir(save_path) + max_run = max([int(dir.split('_')[1]) for dir in src_dirs]) if len(src_dirs) > 0 else -1 + current_run_dir = os.path.join(save_path, 'run_' + str(max_run + 1) + '/') + os.makedirs(current_run_dir) + + if logger: + logger.info(f'Created current run dir: {current_run_dir}.') + + # Save run params in current run dir for reference + with open(os.path.join(current_run_dir, 'params.yml'), 'w') as stream: + yaml.dump(params, stream, default_flow_style=False) + + if logger: + logger.info(f'Saved run parameter to current run dir.') + + return current_run_dir + +def check_empty_count_gpus(logger=None): + """ + Check that GPU is available, empty the cache, + and count the number of available devices. + """ + + # Check that a GPU is available: + assert torch.cuda.is_available(), 'No GPU found. Please run on a GPU.' + + # Empty GPU cache + torch.cuda.empty_cache() + + # Count available devices + device_count = torch.cuda.device_count() + + if logger: + logger.info(f'Found {device_count} GPU(s)!') + +def np_sigmoid(z): + """ + Convert logits to probabilities: + https://en.wikipedia.org/wiki/Sigmoid_function. + """ + + return 1 / (1 + np.exp(-z)) + +def to_binary_one_hot(y): + """ + Convery 0 and 1 labels to + [1, 0] and [0, 1] for generality + """ + + yn = np.zeros((len(y), 2), dtype=int) + for i, val in enumerate(y): + yn[i, 0] = 1 - val # 0 -> 1 & 1 -> 0 + yn[i, 1] = val # 0 -> 0 & 1 -> 1 + + return yn + +def load_and_split_imdb_data(path, seed=42, num_labels=1): + """ + Load and split imdb data for testing code. + """ + + # Read data and create features and labels + df = pd.read_csv(path) + texts = df['sentence'].tolist() + labels = df['target'].tolist() + + # Create train, val, test splits + X_train, X_test, y_train, y_test = train_test_split(texts, labels, test_size=.15, random_state=seed, shuffle=True) + X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=.15, random_state=seed, shuffle=False) + + # Convert binary labels to one hot labels for generality or use a single binary label wrapped in an extra dim + if num_labels == 1: + y_train = np.array([[x] for x in y_train]) + y_val = np.array([[x] for x in y_val]) + y_test = np.array([[x] for x in y_test]) + elif num_labels == 2: + y_train = to_binary_one_hot(y_train) + y_val = to_binary_one_hot(y_val) + y_test = to_binary_one_hot(y_test) + else: + raise ValueError("For this dataset, the labels should be encoded using either 1 or 2 columns.") + + return X_train, X_val, X_test, y_train, y_val, y_test + +def plot_learning_curve(result, current_run_dir, prefix): + + cmap = plt.get_cmap("tab10") + fig, ax = plt.subplots(figsize = (10,10)) + + for i, (key, value) in enumerate(result.items()): + ax.plot(value, '-',label=key,color=cmap(i)) + ax.legend() + + plt.legend(loc='upper right') + plt.xlabel('Epoch') + plt.ylabel('Value') + plt.title('Learning Curves') + plt.tight_layout() + plt.savefig(current_run_dir + f'{prefix}_learning_curves.png', transparent=False)